Print this page
NEX-18203 fcp should call mdi_pi_offline() without NDI_DEVI_REMOVE
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Reviewed by: Dan Fields <dan.fields@nexenta.com>
NEX-17944 HBA drivers don't need the redundant devfs_clean step
Reviewed by: Dan Fields <dan.fields@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
NEX-3153 fcp unable to offline paths to drives behind ATTO Fibrebridge 6500
Reviewed by: Steve Peng <steve.peng@nexenta.com>
Reviewed by: Dan Fields <dan.fields@nexenta.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/ulp/fcp.c
+++ new/usr/src/uts/common/io/fibre-channel/ulp/fcp.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 +
21 22 /*
22 23 * Copyright (c) 1999, 2010, Oracle and/or its affiliates. All rights reserved.
23 - *
24 + */
25 +
26 +/*
27 + * Copyright 2018 Nexenta Systems, Inc.
28 + */
29 +
30 +/*
24 31 * Fibre Channel SCSI ULP Mapping driver
25 32 */
26 33
27 34 #include <sys/scsi/scsi.h>
28 35 #include <sys/types.h>
29 36 #include <sys/varargs.h>
30 37 #include <sys/devctl.h>
31 38 #include <sys/thread.h>
32 39 #include <sys/thread.h>
33 40 #include <sys/open.h>
34 41 #include <sys/file.h>
35 42 #include <sys/sunndi.h>
36 43 #include <sys/console.h>
37 44 #include <sys/proc.h>
38 45 #include <sys/time.h>
39 46 #include <sys/utsname.h>
40 47 #include <sys/scsi/impl/scsi_reset_notify.h>
41 48 #include <sys/ndi_impldefs.h>
42 49 #include <sys/byteorder.h>
43 -#include <sys/fs/dv_node.h>
44 50 #include <sys/ctype.h>
45 51 #include <sys/sunmdi.h>
46 52
47 53 #include <sys/fibre-channel/fc.h>
48 54 #include <sys/fibre-channel/impl/fc_ulpif.h>
49 55 #include <sys/fibre-channel/ulp/fcpvar.h>
50 56
51 57 /*
52 58 * Discovery Process
53 59 * =================
54 60 *
55 61 * The discovery process is a major function of FCP. In order to help
56 62 * understand that function a flow diagram is given here. This diagram
57 63 * doesn't claim to cover all the cases and the events that can occur during
58 64 * the discovery process nor the subtleties of the code. The code paths shown
59 65 * are simplified. Its purpose is to help the reader (and potentially bug
60 66 * fixer) have an overall view of the logic of the code. For that reason the
61 67 * diagram covers the simple case of the line coming up cleanly or of a new
62 68 * port attaching to FCP the link being up. The reader must keep in mind
63 69 * that:
64 70 *
65 71 * - There are special cases where bringing devices online and offline
66 72 * is driven by Ioctl.
67 73 *
68 74 * - The behavior of the discovery process can be modified through the
69 75 * .conf file.
70 76 *
71 77 * - The line can go down and come back up at any time during the
72 78 * discovery process which explains some of the complexity of the code.
73 79 *
74 80 * ............................................................................
75 81 *
76 82 * STEP 1: The line comes up or a new Fibre Channel port attaches to FCP.
77 83 *
78 84 *
79 85 * +-------------------------+
80 86 * fp/fctl module --->| fcp_port_attach |
81 87 * +-------------------------+
82 88 * | |
83 89 * | |
84 90 * | v
85 91 * | +-------------------------+
86 92 * | | fcp_handle_port_attach |
87 93 * | +-------------------------+
88 94 * | |
89 95 * | |
90 96 * +--------------------+ |
91 97 * | |
92 98 * v v
93 99 * +-------------------------+
94 100 * | fcp_statec_callback |
95 101 * +-------------------------+
96 102 * |
97 103 * |
98 104 * v
99 105 * +-------------------------+
100 106 * | fcp_handle_devices |
101 107 * +-------------------------+
102 108 * |
103 109 * |
104 110 * v
105 111 * +-------------------------+
106 112 * | fcp_handle_mapflags |
107 113 * +-------------------------+
108 114 * |
109 115 * |
110 116 * v
111 117 * +-------------------------+
112 118 * | fcp_send_els |
113 119 * | |
114 120 * | PLOGI or PRLI To all the|
115 121 * | reachable devices. |
116 122 * +-------------------------+
117 123 *
118 124 *
119 125 * ............................................................................
120 126 *
121 127 * STEP 2: The callback functions of the PLOGI and/or PRLI requests sent during
122 128 * STEP 1 are called (it is actually the same function).
123 129 *
124 130 *
125 131 * +-------------------------+
126 132 * | fcp_icmd_callback |
127 133 * fp/fctl module --->| |
128 134 * | callback for PLOGI and |
129 135 * | PRLI. |
130 136 * +-------------------------+
131 137 * |
132 138 * |
133 139 * Received PLOGI Accept /-\ Received PRLI Accept
134 140 * _ _ _ _ _ _ / \_ _ _ _ _ _
135 141 * | \ / |
136 142 * | \-/ |
137 143 * | |
138 144 * v v
139 145 * +-------------------------+ +-------------------------+
140 146 * | fcp_send_els | | fcp_send_scsi |
141 147 * | | | |
142 148 * | PRLI | | REPORT_LUN |
143 149 * +-------------------------+ +-------------------------+
144 150 *
145 151 * ............................................................................
146 152 *
147 153 * STEP 3: The callback functions of the SCSI commands issued by FCP are called
148 154 * (It is actually the same function).
149 155 *
150 156 *
151 157 * +-------------------------+
152 158 * fp/fctl module ------->| fcp_scsi_callback |
153 159 * +-------------------------+
154 160 * |
155 161 * |
156 162 * |
157 163 * Receive REPORT_LUN reply /-\ Receive INQUIRY PAGE83 reply
158 164 * _ _ _ _ _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ _ _ _ _
159 165 * | \ / |
160 166 * | \-/ |
161 167 * | | |
162 168 * | Receive INQUIRY reply| |
163 169 * | | |
164 170 * v v v
165 171 * +------------------------+ +----------------------+ +----------------------+
166 172 * | fcp_handle_reportlun | | fcp_handle_inquiry | | fcp_handle_page83 |
167 173 * |(Called for each Target)| | (Called for each LUN)| |(Called for each LUN) |
168 174 * +------------------------+ +----------------------+ +----------------------+
169 175 * | | |
170 176 * | | |
171 177 * | | |
172 178 * v v |
173 179 * +-----------------+ +-----------------+ |
174 180 * | fcp_send_scsi | | fcp_send_scsi | |
175 181 * | | | | |
176 182 * | INQUIRY | | INQUIRY PAGE83 | |
177 183 * | (To each LUN) | +-----------------+ |
178 184 * +-----------------+ |
179 185 * |
180 186 * v
181 187 * +------------------------+
182 188 * | fcp_call_finish_init |
183 189 * +------------------------+
184 190 * |
185 191 * v
186 192 * +-----------------------------+
187 193 * | fcp_call_finish_init_held |
188 194 * +-----------------------------+
189 195 * |
190 196 * |
191 197 * All LUNs scanned /-\
192 198 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ __ / \
193 199 * | \ /
194 200 * | \-/
195 201 * v |
196 202 * +------------------+ |
197 203 * | fcp_finish_tgt | |
198 204 * +------------------+ |
199 205 * | Target Not Offline and |
200 206 * Target Not Offline and | not marked and tgt_node_state |
201 207 * marked /-\ not FCP_TGT_NODE_ON_DEMAND |
202 208 * _ _ _ _ _ _ / \_ _ _ _ _ _ _ _ |
203 209 * | \ / | |
204 210 * | \-/ | |
205 211 * v v |
206 212 * +----------------------------+ +-------------------+ |
207 213 * | fcp_offline_target | | fcp_create_luns | |
208 214 * | | +-------------------+ |
209 215 * | A structure fcp_tgt_elem | | |
210 216 * | is created and queued in | v |
211 217 * | the FCP port list | +-------------------+ |
212 218 * | port_offline_tgts. It | | fcp_pass_to_hp | |
213 219 * | will be unqueued by the | | | |
214 220 * | watchdog timer. | | Called for each | |
215 221 * +----------------------------+ | LUN. Dispatches | |
216 222 * | | fcp_hp_task | |
217 223 * | +-------------------+ |
218 224 * | | |
219 225 * | | |
220 226 * | | |
221 227 * | +---------------->|
222 228 * | |
223 229 * +---------------------------------------------->|
224 230 * |
225 231 * |
226 232 * All the targets (devices) have been scanned /-\
227 233 * _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ _ / \
228 234 * | \ /
229 235 * | \-/
230 236 * +-------------------------------------+ |
231 237 * | fcp_finish_init | |
232 238 * | | |
233 239 * | Signal broadcasts the condition | |
234 240 * | variable port_config_cv of the FCP | |
235 241 * | port. One potential code sequence | |
236 242 * | waiting on the condition variable | |
237 243 * | the code sequence handling | |
238 244 * | BUS_CONFIG_ALL and BUS_CONFIG_DRIVER| |
239 245 * | The other is in the function | |
240 246 * | fcp_reconfig_wait which is called | |
241 247 * | in the transmit path preventing IOs | |
242 248 * | from going through till the disco- | |
243 249 * | very process is over. | |
244 250 * +-------------------------------------+ |
245 251 * | |
246 252 * | |
247 253 * +--------------------------------->|
248 254 * |
249 255 * v
250 256 * Return
251 257 *
252 258 * ............................................................................
253 259 *
254 260 * STEP 4: The hot plug task is called (for each fcp_hp_elem).
255 261 *
256 262 *
257 263 * +-------------------------+
258 264 * | fcp_hp_task |
259 265 * +-------------------------+
260 266 * |
261 267 * |
262 268 * v
263 269 * +-------------------------+
264 270 * | fcp_trigger_lun |
265 271 * +-------------------------+
266 272 * |
267 273 * |
268 274 * v
269 275 * Bring offline /-\ Bring online
270 276 * _ _ _ _ _ _ _ _ _/ \_ _ _ _ _ _ _ _ _ _
271 277 * | \ / |
272 278 * | \-/ |
273 279 * v v
274 280 * +---------------------+ +-----------------------+
275 281 * | fcp_offline_child | | fcp_get_cip |
276 282 * +---------------------+ | |
277 283 * | Creates a dev_info_t |
278 284 * | or a mdi_pathinfo_t |
279 285 * | depending on whether |
280 286 * | mpxio is on or off. |
281 287 * +-----------------------+
282 288 * |
283 289 * |
284 290 * v
285 291 * +-----------------------+
286 292 * | fcp_online_child |
287 293 * | |
288 294 * | Set device online |
289 295 * | using NDI or MDI. |
290 296 * +-----------------------+
291 297 *
292 298 * ............................................................................
293 299 *
294 300 * STEP 5: The watchdog timer expires. The watch dog timer does much more that
295 301 * what is described here. We only show the target offline path.
296 302 *
297 303 *
298 304 * +--------------------------+
299 305 * | fcp_watch |
300 306 * +--------------------------+
301 307 * |
302 308 * |
303 309 * v
304 310 * +--------------------------+
305 311 * | fcp_scan_offline_tgts |
306 312 * +--------------------------+
307 313 * |
308 314 * |
309 315 * v
310 316 * +--------------------------+
311 317 * | fcp_offline_target_now |
312 318 * +--------------------------+
313 319 * |
314 320 * |
315 321 * v
316 322 * +--------------------------+
317 323 * | fcp_offline_tgt_luns |
318 324 * +--------------------------+
319 325 * |
320 326 * |
321 327 * v
322 328 * +--------------------------+
323 329 * | fcp_offline_lun |
324 330 * +--------------------------+
325 331 * |
326 332 * |
327 333 * v
328 334 * +----------------------------------+
329 335 * | fcp_offline_lun_now |
330 336 * | |
331 337 * | A request (or two if mpxio) is |
332 338 * | sent to the hot plug task using |
333 339 * | a fcp_hp_elem structure. |
334 340 * +----------------------------------+
335 341 */
336 342
337 343 /*
338 344 * Functions registered with DDI framework
339 345 */
340 346 static int fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd);
341 347 static int fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd);
342 348 static int fcp_open(dev_t *devp, int flag, int otype, cred_t *credp);
343 349 static int fcp_close(dev_t dev, int flag, int otype, cred_t *credp);
344 350 static int fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
345 351 cred_t *credp, int *rval);
346 352
347 353 /*
348 354 * Functions registered with FC Transport framework
349 355 */
350 356 static int fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
351 357 fc_attach_cmd_t cmd, uint32_t s_id);
352 358 static int fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
353 359 fc_detach_cmd_t cmd);
354 360 static int fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev,
355 361 int cmd, intptr_t data, int mode, cred_t *credp, int *rval,
356 362 uint32_t claimed);
357 363 static int fcp_els_callback(opaque_t ulph, opaque_t port_handle,
358 364 fc_unsol_buf_t *buf, uint32_t claimed);
359 365 static int fcp_data_callback(opaque_t ulph, opaque_t port_handle,
360 366 fc_unsol_buf_t *buf, uint32_t claimed);
361 367 static void fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
362 368 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
363 369 uint32_t dev_cnt, uint32_t port_sid);
364 370
365 371 /*
366 372 * Functions registered with SCSA framework
367 373 */
368 374 static int fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
369 375 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
370 376 static int fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
371 377 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
372 378 static void fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
373 379 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
374 380 static int fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt);
375 381 static int fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt);
376 382 static int fcp_scsi_reset(struct scsi_address *ap, int level);
377 383 static int fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom);
378 384 static int fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value,
379 385 int whom);
380 386 static void fcp_pkt_teardown(struct scsi_pkt *pkt);
381 387 static int fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
382 388 void (*callback)(caddr_t), caddr_t arg);
383 389 static int fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip,
384 390 char *name, ddi_eventcookie_t *event_cookiep);
385 391 static int fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
386 392 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
387 393 ddi_callback_id_t *cb_id);
388 394 static int fcp_scsi_bus_remove_eventcall(dev_info_t *devi,
389 395 ddi_callback_id_t cb_id);
390 396 static int fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
391 397 ddi_eventcookie_t eventid, void *impldata);
392 398 static int fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
393 399 ddi_bus_config_op_t op, void *arg, dev_info_t **childp);
394 400 static int fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
395 401 ddi_bus_config_op_t op, void *arg);
396 402
397 403 /*
398 404 * Internal functions
399 405 */
400 406 static int fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data,
401 407 int mode, int *rval);
402 408
403 409 static int fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
404 410 int mode, int *rval);
405 411 static int fcp_copyin_scsi_cmd(caddr_t base_addr,
406 412 struct fcp_scsi_cmd *fscsi, int mode);
407 413 static int fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi,
408 414 caddr_t base_addr, int mode);
409 415 static int fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi);
410 416
411 417 static struct fcp_tgt *fcp_port_create_tgt(struct fcp_port *pptr,
412 418 la_wwn_t *pwwn, int *ret_val, int *fc_status, int *fc_pkt_state,
413 419 int *fc_pkt_reason, int *fc_pkt_action);
414 420 static int fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status,
415 421 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
416 422 static int fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status,
417 423 int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action);
418 424 static void fcp_ipkt_sema_init(struct fcp_ipkt *icmd);
419 425 static int fcp_ipkt_sema_wait(struct fcp_ipkt *icmd);
420 426 static void fcp_ipkt_sema_callback(struct fc_packet *fpkt);
421 427 static void fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd);
422 428
423 429 static void fcp_handle_devices(struct fcp_port *pptr,
424 430 fc_portmap_t devlist[], uint32_t dev_cnt, int link_cnt,
425 431 fcp_map_tag_t *map_tag, int cause);
426 432 static int fcp_handle_mapflags(struct fcp_port *pptr,
427 433 struct fcp_tgt *ptgt, fc_portmap_t *map_entry, int link_cnt,
428 434 int tgt_cnt, int cause);
429 435 static int fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause);
430 436 static int fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
431 437 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause);
432 438 static void fcp_update_state(struct fcp_port *pptr, uint32_t state,
433 439 int cause);
434 440 static void fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag,
435 441 uint32_t state);
436 442 static struct fcp_port *fcp_get_port(opaque_t port_handle);
437 443 static void fcp_unsol_callback(fc_packet_t *fpkt);
438 444 static void fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
439 445 uchar_t r_ctl, uchar_t type);
440 446 static int fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf);
441 447 static struct fcp_ipkt *fcp_icmd_alloc(struct fcp_port *pptr,
442 448 struct fcp_tgt *ptgt, int cmd_len, int resp_len, int data_len,
443 449 int nodma, int lcount, int tcount, int cause, uint32_t rscn_count);
444 450 static void fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd);
445 451 static int fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
446 452 int nodma, int flags);
447 453 static void fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd);
448 454 static struct fcp_tgt *fcp_lookup_target(struct fcp_port *pptr,
449 455 uchar_t *wwn);
450 456 static struct fcp_tgt *fcp_get_target_by_did(struct fcp_port *pptr,
451 457 uint32_t d_id);
452 458 static void fcp_icmd_callback(fc_packet_t *fpkt);
453 459 static int fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode,
454 460 int len, int lcount, int tcount, int cause, uint32_t rscn_count);
455 461 static int fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt);
456 462 static void fcp_scsi_callback(fc_packet_t *fpkt);
457 463 static void fcp_retry_scsi_cmd(fc_packet_t *fpkt);
458 464 static void fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
459 465 static void fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd);
460 466 static struct fcp_lun *fcp_get_lun(struct fcp_tgt *ptgt,
461 467 uint16_t lun_num);
462 468 static int fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
463 469 int link_cnt, int tgt_cnt, int cause);
464 470 static void fcp_finish_init(struct fcp_port *pptr);
465 471 static void fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt,
466 472 int tgt_cnt, int cause);
467 473 static int fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip,
468 474 int old_mpxio, int online, int link_cnt, int tgt_cnt, int flags);
469 475 static int fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
470 476 int link_cnt, int tgt_cnt, int nowait, int flags);
471 477 static void fcp_offline_target_now(struct fcp_port *pptr,
472 478 struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int flags);
473 479 static void fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt,
474 480 int tgt_cnt, int flags);
475 481 static void fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
476 482 int nowait, int flags);
477 483 static void fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt,
478 484 int tgt_cnt);
479 485 static void fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt,
480 486 int tgt_cnt, int flags);
481 487 static void fcp_scan_offline_luns(struct fcp_port *pptr);
482 488 static void fcp_scan_offline_tgts(struct fcp_port *pptr);
483 489 static void fcp_update_offline_flags(struct fcp_lun *plun);
484 490 static struct fcp_pkt *fcp_scan_commands(struct fcp_lun *plun);
485 491 static void fcp_abort_commands(struct fcp_pkt *head, struct
486 492 fcp_port *pptr);
487 493 static void fcp_cmd_callback(fc_packet_t *fpkt);
488 494 static void fcp_complete_pkt(fc_packet_t *fpkt);
489 495 static int fcp_validate_fcp_response(struct fcp_rsp *rsp,
490 496 struct fcp_port *pptr);
491 497 static int fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
492 498 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause);
493 499 static struct fcp_lun *fcp_alloc_lun(struct fcp_tgt *ptgt);
494 500 static void fcp_dealloc_lun(struct fcp_lun *plun);
495 501 static struct fcp_tgt *fcp_alloc_tgt(struct fcp_port *pptr,
496 502 fc_portmap_t *map_entry, int link_cnt);
497 503 static void fcp_dealloc_tgt(struct fcp_tgt *ptgt);
498 504 static void fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt);
499 505 static int fcp_transport(opaque_t port_handle, fc_packet_t *fpkt,
500 506 int internal);
501 507 static void fcp_log(int level, dev_info_t *dip, const char *fmt, ...);
502 508 static int fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
503 509 uint32_t s_id, int instance);
504 510 static int fcp_handle_port_detach(struct fcp_port *pptr, int flag,
505 511 int instance);
506 512 static void fcp_cleanup_port(struct fcp_port *pptr, int instance);
507 513 static int fcp_kmem_cache_constructor(struct scsi_pkt *, scsi_hba_tran_t *,
508 514 int);
509 515 static void fcp_kmem_cache_destructor(struct scsi_pkt *, scsi_hba_tran_t *);
510 516 static int fcp_pkt_setup(struct scsi_pkt *, int (*)(), caddr_t);
511 517 static int fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt,
512 518 int flags);
513 519 static void fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt);
514 520 static int fcp_reset_target(struct scsi_address *ap, int level);
515 521 static int fcp_commoncap(struct scsi_address *ap, char *cap,
516 522 int val, int tgtonly, int doset);
517 523 static int fcp_scsi_get_name(struct scsi_device *sd, char *name, int len);
518 524 static int fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len);
519 525 static int fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap,
520 526 int sleep);
521 527 static int fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
522 528 uint32_t s_id, fc_attach_cmd_t cmd, int instance);
523 529 static void fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo);
524 530 static void fcp_process_elem(struct fcp_hp_elem *elem, int result);
525 531 static child_info_t *fcp_get_cip(struct fcp_lun *plun, child_info_t *cip,
526 532 int lcount, int tcount);
527 533 static int fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip);
528 534 static int fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip);
529 535 static dev_info_t *fcp_create_dip(struct fcp_lun *plun, int link_cnt,
530 536 int tgt_cnt);
531 537 static dev_info_t *fcp_find_existing_dip(struct fcp_lun *plun,
532 538 dev_info_t *pdip, caddr_t name);
533 539 static int fcp_online_child(struct fcp_lun *plun, child_info_t *cip,
534 540 int lcount, int tcount, int flags, int *circ);
535 541 static int fcp_offline_child(struct fcp_lun *plun, child_info_t *cip,
536 542 int lcount, int tcount, int flags, int *circ);
537 543 static void fcp_remove_child(struct fcp_lun *plun);
538 544 static void fcp_watch(void *arg);
539 545 static void fcp_check_reset_delay(struct fcp_port *pptr);
540 546 static void fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
541 547 struct fcp_lun *rlun, int tgt_cnt);
542 548 struct fcp_port *fcp_soft_state_unlink(struct fcp_port *pptr);
543 549 static struct fcp_lun *fcp_lookup_lun(struct fcp_port *pptr,
544 550 uchar_t *wwn, uint16_t lun);
545 551 static void fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
546 552 struct fcp_lun *plun);
547 553 static void fcp_post_callback(struct fcp_pkt *cmd);
548 554 static int fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd);
549 555 static struct fcp_port *fcp_dip2port(dev_info_t *dip);
550 556 struct fcp_lun *fcp_get_lun_from_cip(struct fcp_port *pptr,
551 557 child_info_t *cip);
552 558 static int fcp_pass_to_hp_and_wait(struct fcp_port *pptr,
553 559 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
554 560 int tgt_cnt, int flags);
555 561 static struct fcp_hp_elem *fcp_pass_to_hp(struct fcp_port *pptr,
556 562 struct fcp_lun *plun, child_info_t *cip, int what, int link_cnt,
557 563 int tgt_cnt, int flags, int wait);
558 564 static void fcp_retransport_cmd(struct fcp_port *pptr,
559 565 struct fcp_pkt *cmd);
560 566 static void fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason,
561 567 uint_t statistics);
562 568 static void fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd);
563 569 static void fcp_update_targets(struct fcp_port *pptr,
564 570 fc_portmap_t *dev_list, uint32_t count, uint32_t state, int cause);
565 571 static int fcp_call_finish_init(struct fcp_port *pptr,
566 572 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
567 573 static int fcp_call_finish_init_held(struct fcp_port *pptr,
568 574 struct fcp_tgt *ptgt, int lcount, int tcount, int cause);
569 575 static void fcp_reconfigure_luns(void * tgt_handle);
570 576 static void fcp_free_targets(struct fcp_port *pptr);
571 577 static void fcp_free_target(struct fcp_tgt *ptgt);
572 578 static int fcp_is_retryable(struct fcp_ipkt *icmd);
573 579 static int fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn);
574 580 static void fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int);
575 581 static void fcp_wwn_to_ascii(uchar_t bytes[], char *string);
576 582 static void fcp_print_error(fc_packet_t *fpkt);
577 583 static int fcp_handle_ipkt_errors(struct fcp_port *pptr,
578 584 struct fcp_tgt *ptgt, struct fcp_ipkt *icmd, int rval, caddr_t op);
579 585 static int fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt);
580 586 static fc_portmap_t *fcp_construct_map(struct fcp_port *pptr,
581 587 uint32_t *dev_cnt);
582 588 static void fcp_offline_all(struct fcp_port *pptr, int lcount, int cause);
583 589 static int fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval);
584 590 static int fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *, int, int *,
585 591 struct fcp_ioctl *, struct fcp_port **);
586 592 static char *fcp_get_lun_path(struct fcp_lun *plun);
587 593 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
588 594 int *rval);
589 595 static int fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id);
590 596 static void fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id);
591 597 static char *fcp_get_lun_path(struct fcp_lun *plun);
592 598 static int fcp_get_target_mappings(struct fcp_ioctl *data, int mode,
593 599 int *rval);
594 600 static void fcp_reconfig_wait(struct fcp_port *pptr);
595 601
596 602 /*
597 603 * New functions added for mpxio support
598 604 */
599 605 static int fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
600 606 scsi_hba_tran_t *hba_tran, struct scsi_device *sd);
601 607 static mdi_pathinfo_t *fcp_create_pip(struct fcp_lun *plun, int lcount,
602 608 int tcount);
603 609 static mdi_pathinfo_t *fcp_find_existing_pip(struct fcp_lun *plun,
604 610 dev_info_t *pdip);
605 611 static int fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip);
606 612 static void fcp_handle_page83(fc_packet_t *, struct fcp_ipkt *, int);
607 613 static void fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr);
608 614 static int fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp);
609 615 static int fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip,
610 616 int what);
611 617 static int fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
612 618 fc_packet_t *fpkt);
613 619 static int fcp_symmetric_device_probe(struct fcp_lun *plun);
614 620
615 621 /*
616 622 * New functions added for lun masking support
617 623 */
618 624 static void fcp_read_blacklist(dev_info_t *dip,
619 625 struct fcp_black_list_entry **pplun_blacklist);
620 626 static void fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
621 627 struct fcp_black_list_entry **pplun_blacklist);
622 628 static void fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
623 629 struct fcp_black_list_entry **pplun_blacklist);
624 630 static int fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id);
625 631 static void fcp_cleanup_blacklist(struct fcp_black_list_entry **lun_blacklist);
626 632
627 633 /*
628 634 * New functions to support software FCA (like fcoei)
629 635 */
630 636 static struct scsi_pkt *fcp_pseudo_init_pkt(
631 637 struct scsi_address *ap, struct scsi_pkt *pkt,
632 638 struct buf *bp, int cmdlen, int statuslen,
633 639 int tgtlen, int flags, int (*callback)(), caddr_t arg);
634 640 static void fcp_pseudo_destroy_pkt(
635 641 struct scsi_address *ap, struct scsi_pkt *pkt);
636 642 static void fcp_pseudo_sync_pkt(
637 643 struct scsi_address *ap, struct scsi_pkt *pkt);
638 644 static int fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt);
639 645 static void fcp_pseudo_dmafree(
640 646 struct scsi_address *ap, struct scsi_pkt *pkt);
641 647
642 648 extern struct mod_ops mod_driverops;
643 649 /*
644 650 * This variable is defined in modctl.c and set to '1' after the root driver
645 651 * and fs are loaded. It serves as an indication that the root filesystem can
646 652 * be used.
647 653 */
648 654 extern int modrootloaded;
649 655 /*
650 656 * This table contains strings associated with the SCSI sense key codes. It
651 657 * is used by FCP to print a clear explanation of the code returned in the
652 658 * sense information by a device.
653 659 */
654 660 extern char *sense_keys[];
655 661 /*
656 662 * This device is created by the SCSI pseudo nexus driver (SCSI vHCI). It is
657 663 * under this device that the paths to a physical device are created when
658 664 * MPxIO is used.
659 665 */
660 666 extern dev_info_t *scsi_vhci_dip;
661 667
662 668 /*
663 669 * Report lun processing
664 670 */
665 671 #define FCP_LUN_ADDRESSING 0x80
666 672 #define FCP_PD_ADDRESSING 0x00
667 673 #define FCP_VOLUME_ADDRESSING 0x40
668 674
669 675 #define FCP_SVE_THROTTLE 0x28 /* Vicom */
670 676 #define MAX_INT_DMA 0x7fffffff
671 677 /*
672 678 * Property definitions
673 679 */
674 680 #define NODE_WWN_PROP (char *)fcp_node_wwn_prop
675 681 #define PORT_WWN_PROP (char *)fcp_port_wwn_prop
676 682 #define TARGET_PROP (char *)fcp_target_prop
677 683 #define LUN_PROP (char *)fcp_lun_prop
678 684 #define SAM_LUN_PROP (char *)fcp_sam_lun_prop
679 685 #define CONF_WWN_PROP (char *)fcp_conf_wwn_prop
680 686 #define OBP_BOOT_WWN (char *)fcp_obp_boot_wwn
681 687 #define MANUAL_CFG_ONLY (char *)fcp_manual_config_only
682 688 #define INIT_PORT_PROP (char *)fcp_init_port_prop
683 689 #define TGT_PORT_PROP (char *)fcp_tgt_port_prop
684 690 #define LUN_BLACKLIST_PROP (char *)fcp_lun_blacklist_prop
685 691 /*
686 692 * Short hand macros.
687 693 */
688 694 #define LUN_PORT (plun->lun_tgt->tgt_port)
689 695 #define LUN_TGT (plun->lun_tgt)
690 696
691 697 /*
692 698 * Driver private macros
693 699 */
694 700 #define FCP_ATOB(x) (((x) >= '0' && (x) <= '9') ? ((x) - '0') : \
695 701 ((x) >= 'a' && (x) <= 'f') ? \
696 702 ((x) - 'a' + 10) : ((x) - 'A' + 10))
697 703
698 704 #define FCP_MAX(a, b) ((a) > (b) ? (a) : (b))
699 705
700 706 #define FCP_N_NDI_EVENTS \
701 707 (sizeof (fcp_ndi_event_defs) / sizeof (ndi_event_definition_t))
702 708
703 709 #define FCP_LINK_STATE_CHANGED(p, c) \
704 710 ((p)->port_link_cnt != (c)->ipkt_link_cnt)
705 711
706 712 #define FCP_TGT_STATE_CHANGED(t, c) \
707 713 ((t)->tgt_change_cnt != (c)->ipkt_change_cnt)
708 714
709 715 #define FCP_STATE_CHANGED(p, t, c) \
710 716 (FCP_TGT_STATE_CHANGED(t, c))
711 717
712 718 #define FCP_MUST_RETRY(fpkt) \
713 719 ((fpkt)->pkt_state == FC_PKT_LOCAL_BSY || \
714 720 (fpkt)->pkt_state == FC_PKT_LOCAL_RJT || \
715 721 (fpkt)->pkt_state == FC_PKT_TRAN_BSY || \
716 722 (fpkt)->pkt_state == FC_PKT_ELS_IN_PROGRESS || \
717 723 (fpkt)->pkt_state == FC_PKT_NPORT_BSY || \
718 724 (fpkt)->pkt_state == FC_PKT_FABRIC_BSY || \
719 725 (fpkt)->pkt_state == FC_PKT_PORT_OFFLINE || \
720 726 (fpkt)->pkt_reason == FC_REASON_OFFLINE)
721 727
722 728 #define FCP_SENSE_REPORTLUN_CHANGED(es) \
723 729 ((es)->es_key == KEY_UNIT_ATTENTION && \
724 730 (es)->es_add_code == 0x3f && \
725 731 (es)->es_qual_code == 0x0e)
726 732
727 733 #define FCP_SENSE_NO_LUN(es) \
728 734 ((es)->es_key == KEY_ILLEGAL_REQUEST && \
729 735 (es)->es_add_code == 0x25 && \
730 736 (es)->es_qual_code == 0x0)
731 737
732 738 #define FCP_VERSION "20091208-1.192"
733 739 #define FCP_NAME_VERSION "SunFC FCP v" FCP_VERSION
734 740
735 741 #define FCP_NUM_ELEMENTS(array) \
736 742 (sizeof (array) / sizeof ((array)[0]))
737 743
738 744 /*
739 745 * Debugging, Error reporting, and tracing
740 746 */
741 747 #define FCP_LOG_SIZE 1024 * 1024
742 748
743 749 #define FCP_LEVEL_1 0x00001 /* attach/detach PM CPR */
744 750 #define FCP_LEVEL_2 0x00002 /* failures/Invalid data */
745 751 #define FCP_LEVEL_3 0x00004 /* state change, discovery */
746 752 #define FCP_LEVEL_4 0x00008 /* ULP messages */
747 753 #define FCP_LEVEL_5 0x00010 /* ELS/SCSI cmds */
748 754 #define FCP_LEVEL_6 0x00020 /* Transport failures */
749 755 #define FCP_LEVEL_7 0x00040
750 756 #define FCP_LEVEL_8 0x00080 /* I/O tracing */
751 757 #define FCP_LEVEL_9 0x00100 /* I/O tracing */
752 758
753 759
754 760
755 761 /*
756 762 * Log contents to system messages file
757 763 */
758 764 #define FCP_MSG_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_MSG)
759 765 #define FCP_MSG_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_MSG)
760 766 #define FCP_MSG_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_MSG)
761 767 #define FCP_MSG_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_MSG)
762 768 #define FCP_MSG_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_MSG)
763 769 #define FCP_MSG_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_MSG)
764 770 #define FCP_MSG_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_MSG)
765 771 #define FCP_MSG_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_MSG)
766 772 #define FCP_MSG_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_MSG)
767 773
768 774
769 775 /*
770 776 * Log contents to trace buffer
771 777 */
772 778 #define FCP_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF)
773 779 #define FCP_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF)
774 780 #define FCP_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF)
775 781 #define FCP_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF)
776 782 #define FCP_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF)
777 783 #define FCP_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF)
778 784 #define FCP_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF)
779 785 #define FCP_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF)
780 786 #define FCP_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF)
781 787
782 788
783 789 /*
784 790 * Log contents to both system messages file and trace buffer
785 791 */
786 792 #define FCP_MSG_BUF_LEVEL_1 (FCP_LEVEL_1 | FC_TRACE_LOG_BUF | \
787 793 FC_TRACE_LOG_MSG)
788 794 #define FCP_MSG_BUF_LEVEL_2 (FCP_LEVEL_2 | FC_TRACE_LOG_BUF | \
789 795 FC_TRACE_LOG_MSG)
790 796 #define FCP_MSG_BUF_LEVEL_3 (FCP_LEVEL_3 | FC_TRACE_LOG_BUF | \
791 797 FC_TRACE_LOG_MSG)
792 798 #define FCP_MSG_BUF_LEVEL_4 (FCP_LEVEL_4 | FC_TRACE_LOG_BUF | \
793 799 FC_TRACE_LOG_MSG)
794 800 #define FCP_MSG_BUF_LEVEL_5 (FCP_LEVEL_5 | FC_TRACE_LOG_BUF | \
795 801 FC_TRACE_LOG_MSG)
796 802 #define FCP_MSG_BUF_LEVEL_6 (FCP_LEVEL_6 | FC_TRACE_LOG_BUF | \
797 803 FC_TRACE_LOG_MSG)
798 804 #define FCP_MSG_BUF_LEVEL_7 (FCP_LEVEL_7 | FC_TRACE_LOG_BUF | \
799 805 FC_TRACE_LOG_MSG)
800 806 #define FCP_MSG_BUF_LEVEL_8 (FCP_LEVEL_8 | FC_TRACE_LOG_BUF | \
801 807 FC_TRACE_LOG_MSG)
802 808 #define FCP_MSG_BUF_LEVEL_9 (FCP_LEVEL_9 | FC_TRACE_LOG_BUF | \
803 809 FC_TRACE_LOG_MSG)
804 810 #ifdef DEBUG
805 811 #define FCP_DTRACE fc_trace_debug
806 812 #else
807 813 #define FCP_DTRACE
808 814 #endif
809 815
810 816 #define FCP_TRACE fc_trace_debug
811 817
812 818 static struct cb_ops fcp_cb_ops = {
813 819 fcp_open, /* open */
814 820 fcp_close, /* close */
815 821 nodev, /* strategy */
816 822 nodev, /* print */
817 823 nodev, /* dump */
818 824 nodev, /* read */
819 825 nodev, /* write */
820 826 fcp_ioctl, /* ioctl */
821 827 nodev, /* devmap */
822 828 nodev, /* mmap */
823 829 nodev, /* segmap */
824 830 nochpoll, /* chpoll */
825 831 ddi_prop_op, /* cb_prop_op */
826 832 0, /* streamtab */
827 833 D_NEW | D_MP | D_HOTPLUG, /* cb_flag */
828 834 CB_REV, /* rev */
829 835 nodev, /* aread */
830 836 nodev /* awrite */
831 837 };
832 838
833 839
834 840 static struct dev_ops fcp_ops = {
835 841 DEVO_REV,
836 842 0,
837 843 ddi_getinfo_1to1,
838 844 nulldev, /* identify */
839 845 nulldev, /* probe */
840 846 fcp_attach, /* attach and detach are mandatory */
841 847 fcp_detach,
842 848 nodev, /* reset */
843 849 &fcp_cb_ops, /* cb_ops */
844 850 NULL, /* bus_ops */
845 851 NULL, /* power */
846 852 };
847 853
848 854
849 855 char *fcp_version = FCP_NAME_VERSION;
850 856
851 857 static struct modldrv modldrv = {
852 858 &mod_driverops,
853 859 FCP_NAME_VERSION,
854 860 &fcp_ops
855 861 };
856 862
857 863
858 864 static struct modlinkage modlinkage = {
859 865 MODREV_1,
860 866 &modldrv,
861 867 NULL
862 868 };
863 869
864 870
865 871 static fc_ulp_modinfo_t fcp_modinfo = {
866 872 &fcp_modinfo, /* ulp_handle */
867 873 FCTL_ULP_MODREV_4, /* ulp_rev */
868 874 FC4_SCSI_FCP, /* ulp_type */
869 875 "fcp", /* ulp_name */
870 876 FCP_STATEC_MASK, /* ulp_statec_mask */
871 877 fcp_port_attach, /* ulp_port_attach */
872 878 fcp_port_detach, /* ulp_port_detach */
873 879 fcp_port_ioctl, /* ulp_port_ioctl */
874 880 fcp_els_callback, /* ulp_els_callback */
875 881 fcp_data_callback, /* ulp_data_callback */
876 882 fcp_statec_callback /* ulp_statec_callback */
877 883 };
878 884
879 885 #ifdef DEBUG
880 886 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
881 887 FCP_LEVEL_2 | FCP_LEVEL_3 | \
882 888 FCP_LEVEL_4 | FCP_LEVEL_5 | \
883 889 FCP_LEVEL_6 | FCP_LEVEL_7)
884 890 #else
885 891 #define FCP_TRACE_DEFAULT (FC_TRACE_LOG_MASK | FCP_LEVEL_1 | \
886 892 FCP_LEVEL_2 | FCP_LEVEL_3 | \
887 893 FCP_LEVEL_4 | FCP_LEVEL_5 | \
888 894 FCP_LEVEL_6 | FCP_LEVEL_7)
889 895 #endif
890 896
891 897 /* FCP global variables */
892 898 int fcp_bus_config_debug = 0;
893 899 static int fcp_log_size = FCP_LOG_SIZE;
894 900 static int fcp_trace = FCP_TRACE_DEFAULT;
895 901 static fc_trace_logq_t *fcp_logq = NULL;
896 902 static struct fcp_black_list_entry *fcp_lun_blacklist = NULL;
897 903 /*
898 904 * The auto-configuration is set by default. The only way of disabling it is
899 905 * through the property MANUAL_CFG_ONLY in the fcp.conf file.
900 906 */
901 907 static int fcp_enable_auto_configuration = 1;
902 908 static int fcp_max_bus_config_retries = 4;
903 909 static int fcp_lun_ready_retry = 300;
904 910 /*
905 911 * The value assigned to the following variable has changed several times due
906 912 * to a problem with the data underruns reporting of some firmware(s). The
907 913 * current value of 50 gives a timeout value of 25 seconds for a max number
908 914 * of 256 LUNs.
909 915 */
910 916 static int fcp_max_target_retries = 50;
911 917 /*
912 918 * Watchdog variables
913 919 * ------------------
914 920 *
915 921 * fcp_watchdog_init
916 922 *
917 923 * Indicates if the watchdog timer is running or not. This is actually
918 924 * a counter of the number of Fibre Channel ports that attached. When
919 925 * the first port attaches the watchdog is started. When the last port
920 926 * detaches the watchdog timer is stopped.
921 927 *
922 928 * fcp_watchdog_time
923 929 *
924 930 * This is the watchdog clock counter. It is incremented by
925 931 * fcp_watchdog_time each time the watchdog timer expires.
926 932 *
927 933 * fcp_watchdog_timeout
928 934 *
929 935 * Increment value of the variable fcp_watchdog_time as well as the
930 936 * the timeout value of the watchdog timer. The unit is 1 second. It
931 937 * is strange that this is not a #define but a variable since the code
932 938 * never changes this value. The reason why it can be said that the
933 939 * unit is 1 second is because the number of ticks for the watchdog
934 940 * timer is determined like this:
935 941 *
936 942 * fcp_watchdog_tick = fcp_watchdog_timeout *
937 943 * drv_usectohz(1000000);
938 944 *
939 945 * The value 1000000 is hard coded in the code.
940 946 *
941 947 * fcp_watchdog_tick
942 948 *
943 949 * Watchdog timer value in ticks.
944 950 */
945 951 static int fcp_watchdog_init = 0;
946 952 static int fcp_watchdog_time = 0;
947 953 static int fcp_watchdog_timeout = 1;
948 954 static int fcp_watchdog_tick;
949 955
950 956 /*
951 957 * fcp_offline_delay is a global variable to enable customisation of
952 958 * the timeout on link offlines or RSCNs. The default value is set
953 959 * to match FCP_OFFLINE_DELAY (20sec), which is 2*RA_TOV_els as
954 960 * specified in FCP4 Chapter 11 (see www.t10.org).
955 961 *
956 962 * The variable fcp_offline_delay is specified in SECONDS.
957 963 *
958 964 * If we made this a static var then the user would not be able to
959 965 * change it. This variable is set in fcp_attach().
960 966 */
961 967 unsigned int fcp_offline_delay = FCP_OFFLINE_DELAY;
962 968
963 969 static void *fcp_softstate = NULL; /* for soft state */
964 970 static uchar_t fcp_oflag = FCP_IDLE; /* open flag */
965 971 static kmutex_t fcp_global_mutex;
966 972 static kmutex_t fcp_ioctl_mutex;
967 973 static dev_info_t *fcp_global_dip = NULL;
968 974 static timeout_id_t fcp_watchdog_id;
969 975 const char *fcp_lun_prop = "lun";
970 976 const char *fcp_sam_lun_prop = "sam-lun";
971 977 const char *fcp_target_prop = "target";
972 978 /*
973 979 * NOTE: consumers of "node-wwn" property include stmsboot in ON
974 980 * consolidation.
975 981 */
976 982 const char *fcp_node_wwn_prop = "node-wwn";
977 983 const char *fcp_port_wwn_prop = "port-wwn";
978 984 const char *fcp_conf_wwn_prop = "fc-port-wwn";
979 985 const char *fcp_obp_boot_wwn = "fc-boot-dev-portwwn";
980 986 const char *fcp_manual_config_only = "manual_configuration_only";
981 987 const char *fcp_init_port_prop = "initiator-port";
982 988 const char *fcp_tgt_port_prop = "target-port";
983 989 const char *fcp_lun_blacklist_prop = "pwwn-lun-blacklist";
984 990
985 991 static struct fcp_port *fcp_port_head = NULL;
986 992 static ddi_eventcookie_t fcp_insert_eid;
987 993 static ddi_eventcookie_t fcp_remove_eid;
988 994
989 995 static ndi_event_definition_t fcp_ndi_event_defs[] = {
990 996 { FCP_EVENT_TAG_INSERT, FCAL_INSERT_EVENT, EPL_KERNEL },
991 997 { FCP_EVENT_TAG_REMOVE, FCAL_REMOVE_EVENT, EPL_INTERRUPT }
992 998 };
993 999
994 1000 /*
995 1001 * List of valid commands for the scsi_ioctl call
996 1002 */
997 1003 static uint8_t scsi_ioctl_list[] = {
998 1004 SCMD_INQUIRY,
999 1005 SCMD_REPORT_LUN,
1000 1006 SCMD_READ_CAPACITY
1001 1007 };
1002 1008
1003 1009 /*
1004 1010 * this is used to dummy up a report lun response for cases
1005 1011 * where the target doesn't support it
1006 1012 */
1007 1013 static uchar_t fcp_dummy_lun[] = {
1008 1014 0x00, /* MSB length (length = no of luns * 8) */
1009 1015 0x00,
1010 1016 0x00,
1011 1017 0x08, /* LSB length */
1012 1018 0x00, /* MSB reserved */
1013 1019 0x00,
1014 1020 0x00,
1015 1021 0x00, /* LSB reserved */
1016 1022 FCP_PD_ADDRESSING,
1017 1023 0x00, /* LUN is ZERO at the first level */
1018 1024 0x00,
1019 1025 0x00, /* second level is zero */
1020 1026 0x00,
1021 1027 0x00, /* third level is zero */
1022 1028 0x00,
1023 1029 0x00 /* fourth level is zero */
1024 1030 };
1025 1031
1026 1032 static uchar_t fcp_alpa_to_switch[] = {
1027 1033 0x00, 0x7d, 0x7c, 0x00, 0x7b, 0x00, 0x00, 0x00, 0x7a, 0x00,
1028 1034 0x00, 0x00, 0x00, 0x00, 0x00, 0x79, 0x78, 0x00, 0x00, 0x00,
1029 1035 0x00, 0x00, 0x00, 0x77, 0x76, 0x00, 0x00, 0x75, 0x00, 0x74,
1030 1036 0x73, 0x72, 0x00, 0x00, 0x00, 0x71, 0x00, 0x70, 0x6f, 0x6e,
1031 1037 0x00, 0x6d, 0x6c, 0x6b, 0x6a, 0x69, 0x68, 0x00, 0x00, 0x67,
1032 1038 0x66, 0x65, 0x64, 0x63, 0x62, 0x00, 0x00, 0x61, 0x60, 0x00,
1033 1039 0x5f, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x5e, 0x00, 0x5d,
1034 1040 0x5c, 0x5b, 0x00, 0x5a, 0x59, 0x58, 0x57, 0x56, 0x55, 0x00,
1035 1041 0x00, 0x54, 0x53, 0x52, 0x51, 0x50, 0x4f, 0x00, 0x00, 0x4e,
1036 1042 0x4d, 0x00, 0x4c, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x4b,
1037 1043 0x00, 0x4a, 0x49, 0x48, 0x00, 0x47, 0x46, 0x45, 0x44, 0x43,
1038 1044 0x42, 0x00, 0x00, 0x41, 0x40, 0x3f, 0x3e, 0x3d, 0x3c, 0x00,
1039 1045 0x00, 0x3b, 0x3a, 0x00, 0x39, 0x00, 0x00, 0x00, 0x38, 0x37,
1040 1046 0x36, 0x00, 0x35, 0x00, 0x00, 0x00, 0x34, 0x00, 0x00, 0x00,
1041 1047 0x00, 0x00, 0x00, 0x33, 0x32, 0x00, 0x00, 0x00, 0x00, 0x00,
1042 1048 0x00, 0x31, 0x30, 0x00, 0x00, 0x2f, 0x00, 0x2e, 0x2d, 0x2c,
1043 1049 0x00, 0x00, 0x00, 0x2b, 0x00, 0x2a, 0x29, 0x28, 0x00, 0x27,
1044 1050 0x26, 0x25, 0x24, 0x23, 0x22, 0x00, 0x00, 0x21, 0x20, 0x1f,
1045 1051 0x1e, 0x1d, 0x1c, 0x00, 0x00, 0x1b, 0x1a, 0x00, 0x19, 0x00,
1046 1052 0x00, 0x00, 0x00, 0x00, 0x00, 0x18, 0x00, 0x17, 0x16, 0x15,
1047 1053 0x00, 0x14, 0x13, 0x12, 0x11, 0x10, 0x0f, 0x00, 0x00, 0x0e,
1048 1054 0x0d, 0x0c, 0x0b, 0x0a, 0x09, 0x00, 0x00, 0x08, 0x07, 0x00,
1049 1055 0x06, 0x00, 0x00, 0x00, 0x05, 0x04, 0x03, 0x00, 0x02, 0x00,
1050 1056 0x00, 0x00, 0x01, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00, 0x00,
1051 1057 };
1052 1058
1053 1059 static caddr_t pid = "SESS01 ";
1054 1060
1055 1061 #if !defined(lint)
1056 1062
1057 1063 _NOTE(MUTEX_PROTECTS_DATA(fcp_global_mutex,
1058 1064 fcp_port::fcp_next fcp_watchdog_id))
1059 1065
1060 1066 _NOTE(DATA_READABLE_WITHOUT_LOCK(fcp_watchdog_time))
1061 1067
1062 1068 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1063 1069 fcp_insert_eid
1064 1070 fcp_remove_eid
1065 1071 fcp_watchdog_time))
1066 1072
1067 1073 _NOTE(SCHEME_PROTECTS_DATA("Unshared",
1068 1074 fcp_cb_ops
1069 1075 fcp_ops
1070 1076 callb_cpr))
1071 1077
1072 1078 #endif /* lint */
1073 1079
1074 1080 /*
1075 1081 * This table is used to determine whether or not it's safe to copy in
1076 1082 * the target node name for a lun. Since all luns behind the same target
1077 1083 * have the same wwnn, only tagets that do not support multiple luns are
1078 1084 * eligible to be enumerated under mpxio if they aren't page83 compliant.
1079 1085 */
1080 1086
1081 1087 char *fcp_symmetric_disk_table[] = {
1082 1088 "SEAGATE ST",
1083 1089 "IBM DDYFT",
1084 1090 "SUNW SUNWGS", /* Daktari enclosure */
1085 1091 "SUN SENA", /* SES device */
1086 1092 "SUN SESS01" /* VICOM SVE box */
1087 1093 };
1088 1094
1089 1095 int fcp_symmetric_disk_table_size =
1090 1096 sizeof (fcp_symmetric_disk_table)/sizeof (char *);
1091 1097
1092 1098 /*
1093 1099 * This structure is bogus. scsi_hba_attach_setup() requires, as in the kernel
1094 1100 * will panic if you don't pass this in to the routine, this information.
1095 1101 * Need to determine what the actual impact to the system is by providing
1096 1102 * this information if any. Since dma allocation is done in pkt_init it may
1097 1103 * not have any impact. These values are straight from the Writing Device
1098 1104 * Driver manual.
1099 1105 */
1100 1106 static ddi_dma_attr_t pseudo_fca_dma_attr = {
1101 1107 DMA_ATTR_V0, /* ddi_dma_attr version */
1102 1108 0, /* low address */
1103 1109 0xffffffff, /* high address */
1104 1110 0x00ffffff, /* counter upper bound */
1105 1111 1, /* alignment requirements */
1106 1112 0x3f, /* burst sizes */
1107 1113 1, /* minimum DMA access */
1108 1114 0xffffffff, /* maximum DMA access */
1109 1115 (1 << 24) - 1, /* segment boundary restrictions */
1110 1116 1, /* scater/gather list length */
1111 1117 512, /* device granularity */
1112 1118 0 /* DMA flags */
1113 1119 };
1114 1120
1115 1121 /*
1116 1122 * The _init(9e) return value should be that of mod_install(9f). Under
1117 1123 * some circumstances, a failure may not be related mod_install(9f) and
1118 1124 * one would then require a return value to indicate the failure. Looking
1119 1125 * at mod_install(9f), it is expected to return 0 for success and non-zero
1120 1126 * for failure. mod_install(9f) for device drivers, further goes down the
1121 1127 * calling chain and ends up in ddi_installdrv(), whose return values are
1122 1128 * DDI_SUCCESS and DDI_FAILURE - There are also other functions in the
1123 1129 * calling chain of mod_install(9f) which return values like EINVAL and
1124 1130 * in some even return -1.
1125 1131 *
1126 1132 * To work around the vagaries of the mod_install() calling chain, return
1127 1133 * either 0 or ENODEV depending on the success or failure of mod_install()
1128 1134 */
1129 1135 int
1130 1136 _init(void)
1131 1137 {
1132 1138 int rval;
1133 1139
1134 1140 /*
1135 1141 * Allocate soft state and prepare to do ddi_soft_state_zalloc()
1136 1142 * before registering with the transport first.
1137 1143 */
1138 1144 if (ddi_soft_state_init(&fcp_softstate,
1139 1145 sizeof (struct fcp_port), FCP_INIT_ITEMS) != 0) {
1140 1146 return (EINVAL);
1141 1147 }
1142 1148
1143 1149 mutex_init(&fcp_global_mutex, NULL, MUTEX_DRIVER, NULL);
1144 1150 mutex_init(&fcp_ioctl_mutex, NULL, MUTEX_DRIVER, NULL);
1145 1151
1146 1152 if ((rval = fc_ulp_add(&fcp_modinfo)) != FC_SUCCESS) {
1147 1153 cmn_err(CE_WARN, "fcp: fc_ulp_add failed");
1148 1154 mutex_destroy(&fcp_global_mutex);
1149 1155 mutex_destroy(&fcp_ioctl_mutex);
1150 1156 ddi_soft_state_fini(&fcp_softstate);
1151 1157 return (ENODEV);
1152 1158 }
1153 1159
1154 1160 fcp_logq = fc_trace_alloc_logq(fcp_log_size);
1155 1161
1156 1162 if ((rval = mod_install(&modlinkage)) != 0) {
1157 1163 fc_trace_free_logq(fcp_logq);
1158 1164 (void) fc_ulp_remove(&fcp_modinfo);
1159 1165 mutex_destroy(&fcp_global_mutex);
1160 1166 mutex_destroy(&fcp_ioctl_mutex);
1161 1167 ddi_soft_state_fini(&fcp_softstate);
1162 1168 rval = ENODEV;
1163 1169 }
1164 1170
1165 1171 return (rval);
1166 1172 }
1167 1173
1168 1174
1169 1175 /*
1170 1176 * the system is done with us as a driver, so clean up
1171 1177 */
1172 1178 int
1173 1179 _fini(void)
1174 1180 {
1175 1181 int rval;
1176 1182
1177 1183 /*
1178 1184 * don't start cleaning up until we know that the module remove
1179 1185 * has worked -- if this works, then we know that each instance
1180 1186 * has successfully been DDI_DETACHed
1181 1187 */
1182 1188 if ((rval = mod_remove(&modlinkage)) != 0) {
1183 1189 return (rval);
1184 1190 }
1185 1191
1186 1192 (void) fc_ulp_remove(&fcp_modinfo);
1187 1193
1188 1194 ddi_soft_state_fini(&fcp_softstate);
1189 1195 mutex_destroy(&fcp_global_mutex);
1190 1196 mutex_destroy(&fcp_ioctl_mutex);
1191 1197 fc_trace_free_logq(fcp_logq);
1192 1198
1193 1199 return (rval);
1194 1200 }
1195 1201
1196 1202
1197 1203 int
1198 1204 _info(struct modinfo *modinfop)
1199 1205 {
1200 1206 return (mod_info(&modlinkage, modinfop));
1201 1207 }
1202 1208
1203 1209
1204 1210 /*
1205 1211 * attach the module
1206 1212 */
1207 1213 static int
1208 1214 fcp_attach(dev_info_t *devi, ddi_attach_cmd_t cmd)
1209 1215 {
1210 1216 int rval = DDI_SUCCESS;
1211 1217
1212 1218 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1213 1219 FCP_BUF_LEVEL_8, 0, "fcp module attach: cmd=0x%x", cmd);
1214 1220
1215 1221 if (cmd == DDI_ATTACH) {
1216 1222 /* The FCP pseudo device is created here. */
1217 1223 mutex_enter(&fcp_global_mutex);
1218 1224 fcp_global_dip = devi;
1219 1225 mutex_exit(&fcp_global_mutex);
1220 1226
1221 1227 if (ddi_create_minor_node(fcp_global_dip, "fcp", S_IFCHR,
1222 1228 0, DDI_PSEUDO, 0) == DDI_SUCCESS) {
1223 1229 ddi_report_dev(fcp_global_dip);
1224 1230 } else {
1225 1231 cmn_err(CE_WARN, "FCP: Cannot create minor node");
1226 1232 mutex_enter(&fcp_global_mutex);
1227 1233 fcp_global_dip = NULL;
1228 1234 mutex_exit(&fcp_global_mutex);
1229 1235
1230 1236 rval = DDI_FAILURE;
1231 1237 }
1232 1238 /*
1233 1239 * We check the fcp_offline_delay property at this
1234 1240 * point. This variable is global for the driver,
1235 1241 * not specific to an instance.
1236 1242 *
1237 1243 * We do not recommend setting the value to less
1238 1244 * than 10 seconds (RA_TOV_els), or greater than
1239 1245 * 60 seconds.
1240 1246 */
1241 1247 fcp_offline_delay = ddi_prop_get_int(DDI_DEV_T_ANY,
1242 1248 devi, DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
1243 1249 "fcp_offline_delay", FCP_OFFLINE_DELAY);
1244 1250 if ((fcp_offline_delay < 10) ||
1245 1251 (fcp_offline_delay > 60)) {
1246 1252 cmn_err(CE_WARN, "Setting fcp_offline_delay "
1247 1253 "to %d second(s). This is outside the "
1248 1254 "recommended range of 10..60 seconds.",
1249 1255 fcp_offline_delay);
1250 1256 }
1251 1257 }
1252 1258
1253 1259 return (rval);
1254 1260 }
1255 1261
1256 1262
1257 1263 /*ARGSUSED*/
1258 1264 static int
1259 1265 fcp_detach(dev_info_t *devi, ddi_detach_cmd_t cmd)
1260 1266 {
1261 1267 int res = DDI_SUCCESS;
1262 1268
1263 1269 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1264 1270 FCP_BUF_LEVEL_8, 0, "module detach: cmd=0x%x", cmd);
1265 1271
1266 1272 if (cmd == DDI_DETACH) {
1267 1273 /*
1268 1274 * Check if there are active ports/threads. If there
1269 1275 * are any, we will fail, else we will succeed (there
1270 1276 * should not be much to clean up)
1271 1277 */
1272 1278 mutex_enter(&fcp_global_mutex);
1273 1279 FCP_DTRACE(fcp_logq, "fcp",
1274 1280 fcp_trace, FCP_BUF_LEVEL_8, 0, "port_head=%p",
1275 1281 (void *) fcp_port_head);
1276 1282
1277 1283 if (fcp_port_head == NULL) {
1278 1284 ddi_remove_minor_node(fcp_global_dip, NULL);
1279 1285 fcp_global_dip = NULL;
1280 1286 mutex_exit(&fcp_global_mutex);
1281 1287 } else {
1282 1288 mutex_exit(&fcp_global_mutex);
1283 1289 res = DDI_FAILURE;
1284 1290 }
1285 1291 }
1286 1292 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
1287 1293 FCP_BUF_LEVEL_8, 0, "module detach returning %d", res);
1288 1294
1289 1295 return (res);
1290 1296 }
1291 1297
1292 1298
1293 1299 /* ARGSUSED */
1294 1300 static int
1295 1301 fcp_open(dev_t *devp, int flag, int otype, cred_t *credp)
1296 1302 {
1297 1303 if (otype != OTYP_CHR) {
1298 1304 return (EINVAL);
1299 1305 }
1300 1306
1301 1307 /*
1302 1308 * Allow only root to talk;
1303 1309 */
1304 1310 if (drv_priv(credp)) {
1305 1311 return (EPERM);
1306 1312 }
1307 1313
1308 1314 mutex_enter(&fcp_global_mutex);
1309 1315 if (fcp_oflag & FCP_EXCL) {
1310 1316 mutex_exit(&fcp_global_mutex);
1311 1317 return (EBUSY);
1312 1318 }
1313 1319
1314 1320 if (flag & FEXCL) {
1315 1321 if (fcp_oflag & FCP_OPEN) {
1316 1322 mutex_exit(&fcp_global_mutex);
1317 1323 return (EBUSY);
1318 1324 }
1319 1325 fcp_oflag |= FCP_EXCL;
1320 1326 }
1321 1327 fcp_oflag |= FCP_OPEN;
1322 1328 mutex_exit(&fcp_global_mutex);
1323 1329
1324 1330 return (0);
1325 1331 }
1326 1332
1327 1333
1328 1334 /* ARGSUSED */
1329 1335 static int
1330 1336 fcp_close(dev_t dev, int flag, int otype, cred_t *credp)
1331 1337 {
1332 1338 if (otype != OTYP_CHR) {
1333 1339 return (EINVAL);
1334 1340 }
1335 1341
1336 1342 mutex_enter(&fcp_global_mutex);
1337 1343 if (!(fcp_oflag & FCP_OPEN)) {
1338 1344 mutex_exit(&fcp_global_mutex);
1339 1345 return (ENODEV);
1340 1346 }
1341 1347 fcp_oflag = FCP_IDLE;
1342 1348 mutex_exit(&fcp_global_mutex);
1343 1349
1344 1350 return (0);
1345 1351 }
1346 1352
1347 1353
1348 1354 /*
1349 1355 * fcp_ioctl
1350 1356 * Entry point for the FCP ioctls
1351 1357 *
1352 1358 * Input:
1353 1359 * See ioctl(9E)
1354 1360 *
1355 1361 * Output:
1356 1362 * See ioctl(9E)
1357 1363 *
1358 1364 * Returns:
1359 1365 * See ioctl(9E)
1360 1366 *
1361 1367 * Context:
1362 1368 * Kernel context.
1363 1369 */
1364 1370 /* ARGSUSED */
1365 1371 static int
1366 1372 fcp_ioctl(dev_t dev, int cmd, intptr_t data, int mode, cred_t *credp,
1367 1373 int *rval)
1368 1374 {
1369 1375 int ret = 0;
1370 1376
1371 1377 mutex_enter(&fcp_global_mutex);
1372 1378 if (!(fcp_oflag & FCP_OPEN)) {
1373 1379 mutex_exit(&fcp_global_mutex);
1374 1380 return (ENXIO);
1375 1381 }
1376 1382 mutex_exit(&fcp_global_mutex);
1377 1383
1378 1384 switch (cmd) {
1379 1385 case FCP_TGT_INQUIRY:
1380 1386 case FCP_TGT_CREATE:
1381 1387 case FCP_TGT_DELETE:
1382 1388 ret = fcp_setup_device_data_ioctl(cmd,
1383 1389 (struct fcp_ioctl *)data, mode, rval);
1384 1390 break;
1385 1391
1386 1392 case FCP_TGT_SEND_SCSI:
1387 1393 mutex_enter(&fcp_ioctl_mutex);
1388 1394 ret = fcp_setup_scsi_ioctl(
1389 1395 (struct fcp_scsi_cmd *)data, mode, rval);
1390 1396 mutex_exit(&fcp_ioctl_mutex);
1391 1397 break;
1392 1398
1393 1399 case FCP_STATE_COUNT:
1394 1400 ret = fcp_get_statec_count((struct fcp_ioctl *)data,
1395 1401 mode, rval);
1396 1402 break;
1397 1403 case FCP_GET_TARGET_MAPPINGS:
1398 1404 ret = fcp_get_target_mappings((struct fcp_ioctl *)data,
1399 1405 mode, rval);
1400 1406 break;
1401 1407 default:
1402 1408 fcp_log(CE_WARN, NULL,
1403 1409 "!Invalid ioctl opcode = 0x%x", cmd);
1404 1410 ret = EINVAL;
1405 1411 }
1406 1412
1407 1413 return (ret);
1408 1414 }
1409 1415
1410 1416
1411 1417 /*
1412 1418 * fcp_setup_device_data_ioctl
1413 1419 * Setup handler for the "device data" style of
1414 1420 * ioctl for FCP. See "fcp_util.h" for data structure
1415 1421 * definition.
1416 1422 *
1417 1423 * Input:
1418 1424 * cmd = FCP ioctl command
1419 1425 * data = ioctl data
1420 1426 * mode = See ioctl(9E)
1421 1427 *
1422 1428 * Output:
1423 1429 * data = ioctl data
1424 1430 * rval = return value - see ioctl(9E)
1425 1431 *
1426 1432 * Returns:
1427 1433 * See ioctl(9E)
1428 1434 *
1429 1435 * Context:
1430 1436 * Kernel context.
1431 1437 */
1432 1438 /* ARGSUSED */
1433 1439 static int
1434 1440 fcp_setup_device_data_ioctl(int cmd, struct fcp_ioctl *data, int mode,
1435 1441 int *rval)
1436 1442 {
1437 1443 struct fcp_port *pptr;
1438 1444 struct device_data *dev_data;
1439 1445 uint32_t link_cnt;
1440 1446 la_wwn_t *wwn_ptr = NULL;
1441 1447 struct fcp_tgt *ptgt = NULL;
1442 1448 struct fcp_lun *plun = NULL;
1443 1449 int i, error;
1444 1450 struct fcp_ioctl fioctl;
1445 1451
1446 1452 #ifdef _MULTI_DATAMODEL
1447 1453 switch (ddi_model_convert_from(mode & FMODELS)) {
1448 1454 case DDI_MODEL_ILP32: {
1449 1455 struct fcp32_ioctl f32_ioctl;
1450 1456
1451 1457 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1452 1458 sizeof (struct fcp32_ioctl), mode)) {
1453 1459 return (EFAULT);
1454 1460 }
1455 1461 fioctl.fp_minor = f32_ioctl.fp_minor;
1456 1462 fioctl.listlen = f32_ioctl.listlen;
1457 1463 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1458 1464 break;
1459 1465 }
1460 1466 case DDI_MODEL_NONE:
1461 1467 if (ddi_copyin((void *)data, (void *)&fioctl,
1462 1468 sizeof (struct fcp_ioctl), mode)) {
1463 1469 return (EFAULT);
1464 1470 }
1465 1471 break;
1466 1472 }
1467 1473
1468 1474 #else /* _MULTI_DATAMODEL */
1469 1475 if (ddi_copyin((void *)data, (void *)&fioctl,
1470 1476 sizeof (struct fcp_ioctl), mode)) {
1471 1477 return (EFAULT);
1472 1478 }
1473 1479 #endif /* _MULTI_DATAMODEL */
1474 1480
1475 1481 /*
1476 1482 * Right now we can assume that the minor number matches with
1477 1483 * this instance of fp. If this changes we will need to
1478 1484 * revisit this logic.
1479 1485 */
1480 1486 mutex_enter(&fcp_global_mutex);
1481 1487 pptr = fcp_port_head;
1482 1488 while (pptr) {
1483 1489 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1484 1490 break;
1485 1491 } else {
1486 1492 pptr = pptr->port_next;
1487 1493 }
1488 1494 }
1489 1495 mutex_exit(&fcp_global_mutex);
1490 1496 if (pptr == NULL) {
1491 1497 return (ENXIO);
1492 1498 }
1493 1499 mutex_enter(&pptr->port_mutex);
1494 1500
1495 1501
1496 1502 if ((dev_data = kmem_zalloc((sizeof (struct device_data)) *
1497 1503 fioctl.listlen, KM_NOSLEEP)) == NULL) {
1498 1504 mutex_exit(&pptr->port_mutex);
1499 1505 return (ENOMEM);
1500 1506 }
1501 1507
1502 1508 if (ddi_copyin(fioctl.list, dev_data,
1503 1509 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1504 1510 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1505 1511 mutex_exit(&pptr->port_mutex);
1506 1512 return (EFAULT);
1507 1513 }
1508 1514 link_cnt = pptr->port_link_cnt;
1509 1515
1510 1516 if (cmd == FCP_TGT_INQUIRY) {
1511 1517 wwn_ptr = (la_wwn_t *)&(dev_data[0].dev_pwwn);
1512 1518 if (bcmp(wwn_ptr->raw_wwn, pptr->port_pwwn.raw_wwn,
1513 1519 sizeof (wwn_ptr->raw_wwn)) == 0) {
1514 1520 /* This ioctl is requesting INQ info of local HBA */
1515 1521 mutex_exit(&pptr->port_mutex);
1516 1522 dev_data[0].dev0_type = DTYPE_UNKNOWN;
1517 1523 dev_data[0].dev_status = 0;
1518 1524 if (ddi_copyout(dev_data, fioctl.list,
1519 1525 (sizeof (struct device_data)) * fioctl.listlen,
1520 1526 mode)) {
1521 1527 kmem_free(dev_data,
1522 1528 sizeof (*dev_data) * fioctl.listlen);
1523 1529 return (EFAULT);
1524 1530 }
1525 1531 kmem_free(dev_data,
1526 1532 sizeof (*dev_data) * fioctl.listlen);
1527 1533 #ifdef _MULTI_DATAMODEL
1528 1534 switch (ddi_model_convert_from(mode & FMODELS)) {
1529 1535 case DDI_MODEL_ILP32: {
1530 1536 struct fcp32_ioctl f32_ioctl;
1531 1537 f32_ioctl.fp_minor = fioctl.fp_minor;
1532 1538 f32_ioctl.listlen = fioctl.listlen;
1533 1539 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1534 1540 if (ddi_copyout((void *)&f32_ioctl,
1535 1541 (void *)data,
1536 1542 sizeof (struct fcp32_ioctl), mode)) {
1537 1543 return (EFAULT);
1538 1544 }
1539 1545 break;
1540 1546 }
1541 1547 case DDI_MODEL_NONE:
1542 1548 if (ddi_copyout((void *)&fioctl, (void *)data,
1543 1549 sizeof (struct fcp_ioctl), mode)) {
1544 1550 return (EFAULT);
1545 1551 }
1546 1552 break;
1547 1553 }
1548 1554 #else /* _MULTI_DATAMODEL */
1549 1555 if (ddi_copyout((void *)&fioctl, (void *)data,
1550 1556 sizeof (struct fcp_ioctl), mode)) {
1551 1557 return (EFAULT);
1552 1558 }
1553 1559 #endif /* _MULTI_DATAMODEL */
1554 1560 return (0);
1555 1561 }
1556 1562 }
1557 1563
1558 1564 if (pptr->port_state & (FCP_STATE_INIT | FCP_STATE_OFFLINE)) {
1559 1565 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1560 1566 mutex_exit(&pptr->port_mutex);
1561 1567 return (ENXIO);
1562 1568 }
1563 1569
1564 1570 for (i = 0; (i < fioctl.listlen) && (link_cnt == pptr->port_link_cnt);
1565 1571 i++) {
1566 1572 wwn_ptr = (la_wwn_t *)&(dev_data[i].dev_pwwn);
1567 1573
1568 1574 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1569 1575
1570 1576
1571 1577 dev_data[i].dev_status = ENXIO;
1572 1578
1573 1579 if ((ptgt = fcp_lookup_target(pptr,
1574 1580 (uchar_t *)wwn_ptr)) == NULL) {
1575 1581 mutex_exit(&pptr->port_mutex);
1576 1582 if (fc_ulp_get_remote_port(pptr->port_fp_handle,
1577 1583 wwn_ptr, &error, 0) == NULL) {
1578 1584 dev_data[i].dev_status = ENODEV;
1579 1585 mutex_enter(&pptr->port_mutex);
1580 1586 continue;
1581 1587 } else {
1582 1588
1583 1589 dev_data[i].dev_status = EAGAIN;
1584 1590
1585 1591 mutex_enter(&pptr->port_mutex);
1586 1592 continue;
1587 1593 }
1588 1594 } else {
1589 1595 mutex_enter(&ptgt->tgt_mutex);
1590 1596 if (ptgt->tgt_state & (FCP_TGT_MARK |
1591 1597 FCP_TGT_BUSY)) {
1592 1598 dev_data[i].dev_status = EAGAIN;
1593 1599 mutex_exit(&ptgt->tgt_mutex);
1594 1600 continue;
1595 1601 }
1596 1602
1597 1603 if (ptgt->tgt_state & FCP_TGT_OFFLINE) {
1598 1604 if (ptgt->tgt_icap && !ptgt->tgt_tcap) {
1599 1605 dev_data[i].dev_status = ENOTSUP;
1600 1606 } else {
1601 1607 dev_data[i].dev_status = ENXIO;
1602 1608 }
1603 1609 mutex_exit(&ptgt->tgt_mutex);
1604 1610 continue;
1605 1611 }
1606 1612
1607 1613 switch (cmd) {
1608 1614 case FCP_TGT_INQUIRY:
1609 1615 /*
1610 1616 * The reason we give device type of
1611 1617 * lun 0 only even though in some
1612 1618 * cases(like maxstrat) lun 0 device
1613 1619 * type may be 0x3f(invalid) is that
1614 1620 * for bridge boxes target will appear
1615 1621 * as luns and the first lun could be
1616 1622 * a device that utility may not care
1617 1623 * about (like a tape device).
1618 1624 */
1619 1625 dev_data[i].dev_lun_cnt = ptgt->tgt_lun_cnt;
1620 1626 dev_data[i].dev_status = 0;
1621 1627 mutex_exit(&ptgt->tgt_mutex);
1622 1628
1623 1629 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
1624 1630 dev_data[i].dev0_type = DTYPE_UNKNOWN;
1625 1631 } else {
1626 1632 dev_data[i].dev0_type = plun->lun_type;
1627 1633 }
1628 1634 mutex_enter(&ptgt->tgt_mutex);
1629 1635 break;
1630 1636
1631 1637 case FCP_TGT_CREATE:
1632 1638 mutex_exit(&ptgt->tgt_mutex);
1633 1639 mutex_exit(&pptr->port_mutex);
1634 1640
1635 1641 /*
1636 1642 * serialize state change call backs.
1637 1643 * only one call back will be handled
1638 1644 * at a time.
1639 1645 */
1640 1646 mutex_enter(&fcp_global_mutex);
1641 1647 if (fcp_oflag & FCP_BUSY) {
1642 1648 mutex_exit(&fcp_global_mutex);
1643 1649 if (dev_data) {
1644 1650 kmem_free(dev_data,
1645 1651 sizeof (*dev_data) *
1646 1652 fioctl.listlen);
1647 1653 }
1648 1654 return (EBUSY);
1649 1655 }
1650 1656 fcp_oflag |= FCP_BUSY;
1651 1657 mutex_exit(&fcp_global_mutex);
1652 1658
1653 1659 dev_data[i].dev_status =
1654 1660 fcp_create_on_demand(pptr,
1655 1661 wwn_ptr->raw_wwn);
1656 1662
1657 1663 if (dev_data[i].dev_status != 0) {
1658 1664 char buf[25];
1659 1665
1660 1666 for (i = 0; i < FC_WWN_SIZE; i++) {
1661 1667 (void) sprintf(&buf[i << 1],
1662 1668 "%02x",
1663 1669 wwn_ptr->raw_wwn[i]);
1664 1670 }
1665 1671
1666 1672 fcp_log(CE_WARN, pptr->port_dip,
1667 1673 "!Failed to create nodes for"
1668 1674 " pwwn=%s; error=%x", buf,
1669 1675 dev_data[i].dev_status);
1670 1676 }
1671 1677
1672 1678 /* allow state change call backs again */
1673 1679 mutex_enter(&fcp_global_mutex);
1674 1680 fcp_oflag &= ~FCP_BUSY;
1675 1681 mutex_exit(&fcp_global_mutex);
1676 1682
1677 1683 mutex_enter(&pptr->port_mutex);
1678 1684 mutex_enter(&ptgt->tgt_mutex);
1679 1685
1680 1686 break;
1681 1687
1682 1688 case FCP_TGT_DELETE:
1683 1689 break;
1684 1690
1685 1691 default:
1686 1692 fcp_log(CE_WARN, pptr->port_dip,
1687 1693 "!Invalid device data ioctl "
1688 1694 "opcode = 0x%x", cmd);
1689 1695 }
1690 1696 mutex_exit(&ptgt->tgt_mutex);
1691 1697 }
1692 1698 }
1693 1699 mutex_exit(&pptr->port_mutex);
1694 1700
1695 1701 if (ddi_copyout(dev_data, fioctl.list,
1696 1702 (sizeof (struct device_data)) * fioctl.listlen, mode)) {
1697 1703 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1698 1704 return (EFAULT);
1699 1705 }
1700 1706 kmem_free(dev_data, sizeof (*dev_data) * fioctl.listlen);
1701 1707
1702 1708 #ifdef _MULTI_DATAMODEL
1703 1709 switch (ddi_model_convert_from(mode & FMODELS)) {
1704 1710 case DDI_MODEL_ILP32: {
1705 1711 struct fcp32_ioctl f32_ioctl;
1706 1712
1707 1713 f32_ioctl.fp_minor = fioctl.fp_minor;
1708 1714 f32_ioctl.listlen = fioctl.listlen;
1709 1715 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1710 1716 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1711 1717 sizeof (struct fcp32_ioctl), mode)) {
1712 1718 return (EFAULT);
1713 1719 }
1714 1720 break;
1715 1721 }
1716 1722 case DDI_MODEL_NONE:
1717 1723 if (ddi_copyout((void *)&fioctl, (void *)data,
1718 1724 sizeof (struct fcp_ioctl), mode)) {
1719 1725 return (EFAULT);
1720 1726 }
1721 1727 break;
1722 1728 }
1723 1729 #else /* _MULTI_DATAMODEL */
1724 1730
1725 1731 if (ddi_copyout((void *)&fioctl, (void *)data,
1726 1732 sizeof (struct fcp_ioctl), mode)) {
1727 1733 return (EFAULT);
1728 1734 }
1729 1735 #endif /* _MULTI_DATAMODEL */
1730 1736
1731 1737 return (0);
1732 1738 }
1733 1739
1734 1740 /*
1735 1741 * Fetch the target mappings (path, etc.) for all LUNs
1736 1742 * on this port.
1737 1743 */
1738 1744 /* ARGSUSED */
1739 1745 static int
1740 1746 fcp_get_target_mappings(struct fcp_ioctl *data,
1741 1747 int mode, int *rval)
1742 1748 {
1743 1749 struct fcp_port *pptr;
1744 1750 fc_hba_target_mappings_t *mappings;
1745 1751 fc_hba_mapping_entry_t *map;
1746 1752 struct fcp_tgt *ptgt = NULL;
1747 1753 struct fcp_lun *plun = NULL;
1748 1754 int i, mapIndex, mappingSize;
1749 1755 int listlen;
1750 1756 struct fcp_ioctl fioctl;
1751 1757 char *path;
1752 1758 fcp_ent_addr_t sam_lun_addr;
1753 1759
1754 1760 #ifdef _MULTI_DATAMODEL
1755 1761 switch (ddi_model_convert_from(mode & FMODELS)) {
1756 1762 case DDI_MODEL_ILP32: {
1757 1763 struct fcp32_ioctl f32_ioctl;
1758 1764
1759 1765 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
1760 1766 sizeof (struct fcp32_ioctl), mode)) {
1761 1767 return (EFAULT);
1762 1768 }
1763 1769 fioctl.fp_minor = f32_ioctl.fp_minor;
1764 1770 fioctl.listlen = f32_ioctl.listlen;
1765 1771 fioctl.list = (caddr_t)(long)f32_ioctl.list;
1766 1772 break;
1767 1773 }
1768 1774 case DDI_MODEL_NONE:
1769 1775 if (ddi_copyin((void *)data, (void *)&fioctl,
1770 1776 sizeof (struct fcp_ioctl), mode)) {
1771 1777 return (EFAULT);
1772 1778 }
1773 1779 break;
1774 1780 }
1775 1781
1776 1782 #else /* _MULTI_DATAMODEL */
1777 1783 if (ddi_copyin((void *)data, (void *)&fioctl,
1778 1784 sizeof (struct fcp_ioctl), mode)) {
1779 1785 return (EFAULT);
1780 1786 }
1781 1787 #endif /* _MULTI_DATAMODEL */
1782 1788
1783 1789 /*
1784 1790 * Right now we can assume that the minor number matches with
1785 1791 * this instance of fp. If this changes we will need to
1786 1792 * revisit this logic.
1787 1793 */
1788 1794 mutex_enter(&fcp_global_mutex);
1789 1795 pptr = fcp_port_head;
1790 1796 while (pptr) {
1791 1797 if (pptr->port_instance == (uint32_t)fioctl.fp_minor) {
1792 1798 break;
1793 1799 } else {
1794 1800 pptr = pptr->port_next;
1795 1801 }
1796 1802 }
1797 1803 mutex_exit(&fcp_global_mutex);
1798 1804 if (pptr == NULL) {
1799 1805 cmn_err(CE_NOTE, "target mappings: unknown instance number: %d",
1800 1806 fioctl.fp_minor);
1801 1807 return (ENXIO);
1802 1808 }
1803 1809
1804 1810
1805 1811 /* We use listlen to show the total buffer size */
1806 1812 mappingSize = fioctl.listlen;
1807 1813
1808 1814 /* Now calculate how many mapping entries will fit */
1809 1815 listlen = fioctl.listlen + sizeof (fc_hba_mapping_entry_t)
1810 1816 - sizeof (fc_hba_target_mappings_t);
1811 1817 if (listlen <= 0) {
1812 1818 cmn_err(CE_NOTE, "target mappings: Insufficient buffer");
1813 1819 return (ENXIO);
1814 1820 }
1815 1821 listlen = listlen / sizeof (fc_hba_mapping_entry_t);
1816 1822
1817 1823 if ((mappings = kmem_zalloc(mappingSize, KM_SLEEP)) == NULL) {
1818 1824 return (ENOMEM);
1819 1825 }
1820 1826 mappings->version = FC_HBA_TARGET_MAPPINGS_VERSION;
1821 1827
1822 1828 /* Now get to work */
1823 1829 mapIndex = 0;
1824 1830
1825 1831 mutex_enter(&pptr->port_mutex);
1826 1832 /* Loop through all targets on this port */
1827 1833 for (i = 0; i < FCP_NUM_HASH; i++) {
1828 1834 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
1829 1835 ptgt = ptgt->tgt_next) {
1830 1836
1831 1837 mutex_enter(&ptgt->tgt_mutex);
1832 1838
1833 1839 /* Loop through all LUNs on this target */
1834 1840 for (plun = ptgt->tgt_lun; plun != NULL;
1835 1841 plun = plun->lun_next) {
1836 1842 if (plun->lun_state & FCP_LUN_OFFLINE) {
1837 1843 continue;
1838 1844 }
1839 1845
1840 1846 path = fcp_get_lun_path(plun);
1841 1847 if (path == NULL) {
1842 1848 continue;
1843 1849 }
1844 1850
1845 1851 if (mapIndex >= listlen) {
1846 1852 mapIndex ++;
1847 1853 kmem_free(path, MAXPATHLEN);
1848 1854 continue;
1849 1855 }
1850 1856 map = &mappings->entries[mapIndex++];
1851 1857 bcopy(path, map->targetDriver,
1852 1858 sizeof (map->targetDriver));
1853 1859 map->d_id = ptgt->tgt_d_id;
1854 1860 map->busNumber = 0;
1855 1861 map->targetNumber = ptgt->tgt_d_id;
1856 1862 map->osLUN = plun->lun_num;
1857 1863
1858 1864 /*
1859 1865 * We had swapped lun when we stored it in
1860 1866 * lun_addr. We need to swap it back before
1861 1867 * returning it to user land
1862 1868 */
1863 1869
1864 1870 sam_lun_addr.ent_addr_0 =
1865 1871 BE_16(plun->lun_addr.ent_addr_0);
1866 1872 sam_lun_addr.ent_addr_1 =
1867 1873 BE_16(plun->lun_addr.ent_addr_1);
1868 1874 sam_lun_addr.ent_addr_2 =
1869 1875 BE_16(plun->lun_addr.ent_addr_2);
1870 1876 sam_lun_addr.ent_addr_3 =
1871 1877 BE_16(plun->lun_addr.ent_addr_3);
1872 1878
1873 1879 bcopy(&sam_lun_addr, &map->samLUN,
1874 1880 FCP_LUN_SIZE);
1875 1881 bcopy(ptgt->tgt_node_wwn.raw_wwn,
1876 1882 map->NodeWWN.raw_wwn, sizeof (la_wwn_t));
1877 1883 bcopy(ptgt->tgt_port_wwn.raw_wwn,
1878 1884 map->PortWWN.raw_wwn, sizeof (la_wwn_t));
1879 1885
1880 1886 if (plun->lun_guid) {
1881 1887
1882 1888 /* convert ascii wwn to bytes */
1883 1889 fcp_ascii_to_wwn(plun->lun_guid,
1884 1890 map->guid, sizeof (map->guid));
1885 1891
1886 1892 if ((sizeof (map->guid)) <
1887 1893 plun->lun_guid_size / 2) {
1888 1894 cmn_err(CE_WARN,
1889 1895 "fcp_get_target_mappings:"
1890 1896 "guid copy space "
1891 1897 "insufficient."
1892 1898 "Copy Truncation - "
1893 1899 "available %d; need %d",
1894 1900 (int)sizeof (map->guid),
1895 1901 (int)
1896 1902 plun->lun_guid_size / 2);
1897 1903 }
1898 1904 }
1899 1905 kmem_free(path, MAXPATHLEN);
1900 1906 }
1901 1907 mutex_exit(&ptgt->tgt_mutex);
1902 1908 }
1903 1909 }
1904 1910 mutex_exit(&pptr->port_mutex);
1905 1911 mappings->numLuns = mapIndex;
1906 1912
1907 1913 if (ddi_copyout(mappings, fioctl.list, mappingSize, mode)) {
1908 1914 kmem_free(mappings, mappingSize);
1909 1915 return (EFAULT);
1910 1916 }
1911 1917 kmem_free(mappings, mappingSize);
1912 1918
1913 1919 #ifdef _MULTI_DATAMODEL
1914 1920 switch (ddi_model_convert_from(mode & FMODELS)) {
1915 1921 case DDI_MODEL_ILP32: {
1916 1922 struct fcp32_ioctl f32_ioctl;
1917 1923
1918 1924 f32_ioctl.fp_minor = fioctl.fp_minor;
1919 1925 f32_ioctl.listlen = fioctl.listlen;
1920 1926 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
1921 1927 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
1922 1928 sizeof (struct fcp32_ioctl), mode)) {
1923 1929 return (EFAULT);
1924 1930 }
1925 1931 break;
1926 1932 }
1927 1933 case DDI_MODEL_NONE:
1928 1934 if (ddi_copyout((void *)&fioctl, (void *)data,
1929 1935 sizeof (struct fcp_ioctl), mode)) {
1930 1936 return (EFAULT);
1931 1937 }
1932 1938 break;
1933 1939 }
1934 1940 #else /* _MULTI_DATAMODEL */
1935 1941
1936 1942 if (ddi_copyout((void *)&fioctl, (void *)data,
1937 1943 sizeof (struct fcp_ioctl), mode)) {
1938 1944 return (EFAULT);
1939 1945 }
1940 1946 #endif /* _MULTI_DATAMODEL */
1941 1947
1942 1948 return (0);
1943 1949 }
1944 1950
1945 1951 /*
1946 1952 * fcp_setup_scsi_ioctl
1947 1953 * Setup handler for the "scsi passthru" style of
1948 1954 * ioctl for FCP. See "fcp_util.h" for data structure
1949 1955 * definition.
1950 1956 *
1951 1957 * Input:
1952 1958 * u_fscsi = ioctl data (user address space)
1953 1959 * mode = See ioctl(9E)
1954 1960 *
1955 1961 * Output:
1956 1962 * u_fscsi = ioctl data (user address space)
1957 1963 * rval = return value - see ioctl(9E)
1958 1964 *
1959 1965 * Returns:
1960 1966 * 0 = OK
1961 1967 * EAGAIN = See errno.h
1962 1968 * EBUSY = See errno.h
1963 1969 * EFAULT = See errno.h
1964 1970 * EINTR = See errno.h
1965 1971 * EINVAL = See errno.h
1966 1972 * EIO = See errno.h
1967 1973 * ENOMEM = See errno.h
1968 1974 * ENXIO = See errno.h
1969 1975 *
1970 1976 * Context:
1971 1977 * Kernel context.
1972 1978 */
1973 1979 /* ARGSUSED */
1974 1980 static int
1975 1981 fcp_setup_scsi_ioctl(struct fcp_scsi_cmd *u_fscsi,
1976 1982 int mode, int *rval)
1977 1983 {
1978 1984 int ret = 0;
1979 1985 int temp_ret;
1980 1986 caddr_t k_cdbbufaddr = NULL;
1981 1987 caddr_t k_bufaddr = NULL;
1982 1988 caddr_t k_rqbufaddr = NULL;
1983 1989 caddr_t u_cdbbufaddr;
1984 1990 caddr_t u_bufaddr;
1985 1991 caddr_t u_rqbufaddr;
1986 1992 struct fcp_scsi_cmd k_fscsi;
1987 1993
1988 1994 /*
1989 1995 * Get fcp_scsi_cmd array element from user address space
1990 1996 */
1991 1997 if ((ret = fcp_copyin_scsi_cmd((caddr_t)u_fscsi, &k_fscsi, mode))
1992 1998 != 0) {
1993 1999 return (ret);
1994 2000 }
1995 2001
1996 2002
1997 2003 /*
1998 2004 * Even though kmem_alloc() checks the validity of the
1999 2005 * buffer length, this check is needed when the
2000 2006 * kmem_flags set and the zero buffer length is passed.
2001 2007 */
2002 2008 if ((k_fscsi.scsi_cdblen <= 0) ||
2003 2009 (k_fscsi.scsi_buflen <= 0) ||
2004 2010 (k_fscsi.scsi_rqlen <= 0)) {
2005 2011 return (EINVAL);
2006 2012 }
2007 2013
2008 2014 /*
2009 2015 * Allocate data for fcp_scsi_cmd pointer fields
2010 2016 */
2011 2017 if (ret == 0) {
2012 2018 k_cdbbufaddr = kmem_alloc(k_fscsi.scsi_cdblen, KM_NOSLEEP);
2013 2019 k_bufaddr = kmem_alloc(k_fscsi.scsi_buflen, KM_NOSLEEP);
2014 2020 k_rqbufaddr = kmem_alloc(k_fscsi.scsi_rqlen, KM_NOSLEEP);
2015 2021
2016 2022 if (k_cdbbufaddr == NULL ||
2017 2023 k_bufaddr == NULL ||
2018 2024 k_rqbufaddr == NULL) {
2019 2025 ret = ENOMEM;
2020 2026 }
2021 2027 }
2022 2028
2023 2029 /*
2024 2030 * Get fcp_scsi_cmd pointer fields from user
2025 2031 * address space
2026 2032 */
2027 2033 if (ret == 0) {
2028 2034 u_cdbbufaddr = k_fscsi.scsi_cdbbufaddr;
2029 2035 u_bufaddr = k_fscsi.scsi_bufaddr;
2030 2036 u_rqbufaddr = k_fscsi.scsi_rqbufaddr;
2031 2037
2032 2038 if (ddi_copyin(u_cdbbufaddr,
2033 2039 k_cdbbufaddr,
2034 2040 k_fscsi.scsi_cdblen,
2035 2041 mode)) {
2036 2042 ret = EFAULT;
2037 2043 } else if (ddi_copyin(u_bufaddr,
2038 2044 k_bufaddr,
2039 2045 k_fscsi.scsi_buflen,
2040 2046 mode)) {
2041 2047 ret = EFAULT;
2042 2048 } else if (ddi_copyin(u_rqbufaddr,
2043 2049 k_rqbufaddr,
2044 2050 k_fscsi.scsi_rqlen,
2045 2051 mode)) {
2046 2052 ret = EFAULT;
2047 2053 }
2048 2054 }
2049 2055
2050 2056 /*
2051 2057 * Send scsi command (blocking)
2052 2058 */
2053 2059 if (ret == 0) {
2054 2060 /*
2055 2061 * Prior to sending the scsi command, the
2056 2062 * fcp_scsi_cmd data structure must contain kernel,
2057 2063 * not user, addresses.
2058 2064 */
2059 2065 k_fscsi.scsi_cdbbufaddr = k_cdbbufaddr;
2060 2066 k_fscsi.scsi_bufaddr = k_bufaddr;
2061 2067 k_fscsi.scsi_rqbufaddr = k_rqbufaddr;
2062 2068
2063 2069 ret = fcp_send_scsi_ioctl(&k_fscsi);
2064 2070
2065 2071 /*
2066 2072 * After sending the scsi command, the
2067 2073 * fcp_scsi_cmd data structure must contain user,
2068 2074 * not kernel, addresses.
2069 2075 */
2070 2076 k_fscsi.scsi_cdbbufaddr = u_cdbbufaddr;
2071 2077 k_fscsi.scsi_bufaddr = u_bufaddr;
2072 2078 k_fscsi.scsi_rqbufaddr = u_rqbufaddr;
2073 2079 }
2074 2080
2075 2081 /*
2076 2082 * Put fcp_scsi_cmd pointer fields to user address space
2077 2083 */
2078 2084 if (ret == 0) {
2079 2085 if (ddi_copyout(k_cdbbufaddr,
2080 2086 u_cdbbufaddr,
2081 2087 k_fscsi.scsi_cdblen,
2082 2088 mode)) {
2083 2089 ret = EFAULT;
2084 2090 } else if (ddi_copyout(k_bufaddr,
2085 2091 u_bufaddr,
2086 2092 k_fscsi.scsi_buflen,
2087 2093 mode)) {
2088 2094 ret = EFAULT;
2089 2095 } else if (ddi_copyout(k_rqbufaddr,
2090 2096 u_rqbufaddr,
2091 2097 k_fscsi.scsi_rqlen,
2092 2098 mode)) {
2093 2099 ret = EFAULT;
2094 2100 }
2095 2101 }
2096 2102
2097 2103 /*
2098 2104 * Free data for fcp_scsi_cmd pointer fields
2099 2105 */
2100 2106 if (k_cdbbufaddr != NULL) {
2101 2107 kmem_free(k_cdbbufaddr, k_fscsi.scsi_cdblen);
2102 2108 }
2103 2109 if (k_bufaddr != NULL) {
2104 2110 kmem_free(k_bufaddr, k_fscsi.scsi_buflen);
2105 2111 }
2106 2112 if (k_rqbufaddr != NULL) {
2107 2113 kmem_free(k_rqbufaddr, k_fscsi.scsi_rqlen);
2108 2114 }
2109 2115
2110 2116 /*
2111 2117 * Put fcp_scsi_cmd array element to user address space
2112 2118 */
2113 2119 temp_ret = fcp_copyout_scsi_cmd(&k_fscsi, (caddr_t)u_fscsi, mode);
2114 2120 if (temp_ret != 0) {
2115 2121 ret = temp_ret;
2116 2122 }
2117 2123
2118 2124 /*
2119 2125 * Return status
2120 2126 */
2121 2127 return (ret);
2122 2128 }
2123 2129
2124 2130
2125 2131 /*
2126 2132 * fcp_copyin_scsi_cmd
2127 2133 * Copy in fcp_scsi_cmd data structure from user address space.
2128 2134 * The data may be in 32 bit or 64 bit modes.
2129 2135 *
2130 2136 * Input:
2131 2137 * base_addr = from address (user address space)
2132 2138 * mode = See ioctl(9E) and ddi_copyin(9F)
2133 2139 *
2134 2140 * Output:
2135 2141 * fscsi = to address (kernel address space)
2136 2142 *
2137 2143 * Returns:
2138 2144 * 0 = OK
2139 2145 * EFAULT = Error
2140 2146 *
2141 2147 * Context:
2142 2148 * Kernel context.
2143 2149 */
2144 2150 static int
2145 2151 fcp_copyin_scsi_cmd(caddr_t base_addr, struct fcp_scsi_cmd *fscsi, int mode)
2146 2152 {
2147 2153 #ifdef _MULTI_DATAMODEL
2148 2154 struct fcp32_scsi_cmd f32scsi;
2149 2155
2150 2156 switch (ddi_model_convert_from(mode & FMODELS)) {
2151 2157 case DDI_MODEL_ILP32:
2152 2158 /*
2153 2159 * Copy data from user address space
2154 2160 */
2155 2161 if (ddi_copyin((void *)base_addr,
2156 2162 &f32scsi,
2157 2163 sizeof (struct fcp32_scsi_cmd),
2158 2164 mode)) {
2159 2165 return (EFAULT);
2160 2166 }
2161 2167 /*
2162 2168 * Convert from 32 bit to 64 bit
2163 2169 */
2164 2170 FCP32_SCSI_CMD_TO_FCP_SCSI_CMD(&f32scsi, fscsi);
2165 2171 break;
2166 2172 case DDI_MODEL_NONE:
2167 2173 /*
2168 2174 * Copy data from user address space
2169 2175 */
2170 2176 if (ddi_copyin((void *)base_addr,
2171 2177 fscsi,
2172 2178 sizeof (struct fcp_scsi_cmd),
2173 2179 mode)) {
2174 2180 return (EFAULT);
2175 2181 }
2176 2182 break;
2177 2183 }
2178 2184 #else /* _MULTI_DATAMODEL */
2179 2185 /*
2180 2186 * Copy data from user address space
2181 2187 */
2182 2188 if (ddi_copyin((void *)base_addr,
2183 2189 fscsi,
2184 2190 sizeof (struct fcp_scsi_cmd),
2185 2191 mode)) {
2186 2192 return (EFAULT);
2187 2193 }
2188 2194 #endif /* _MULTI_DATAMODEL */
2189 2195
2190 2196 return (0);
2191 2197 }
2192 2198
2193 2199
2194 2200 /*
2195 2201 * fcp_copyout_scsi_cmd
2196 2202 * Copy out fcp_scsi_cmd data structure to user address space.
2197 2203 * The data may be in 32 bit or 64 bit modes.
2198 2204 *
2199 2205 * Input:
2200 2206 * fscsi = to address (kernel address space)
2201 2207 * mode = See ioctl(9E) and ddi_copyin(9F)
2202 2208 *
2203 2209 * Output:
2204 2210 * base_addr = from address (user address space)
2205 2211 *
2206 2212 * Returns:
2207 2213 * 0 = OK
2208 2214 * EFAULT = Error
2209 2215 *
2210 2216 * Context:
2211 2217 * Kernel context.
2212 2218 */
2213 2219 static int
2214 2220 fcp_copyout_scsi_cmd(struct fcp_scsi_cmd *fscsi, caddr_t base_addr, int mode)
2215 2221 {
2216 2222 #ifdef _MULTI_DATAMODEL
2217 2223 struct fcp32_scsi_cmd f32scsi;
2218 2224
2219 2225 switch (ddi_model_convert_from(mode & FMODELS)) {
2220 2226 case DDI_MODEL_ILP32:
2221 2227 /*
2222 2228 * Convert from 64 bit to 32 bit
2223 2229 */
2224 2230 FCP_SCSI_CMD_TO_FCP32_SCSI_CMD(fscsi, &f32scsi);
2225 2231 /*
2226 2232 * Copy data to user address space
2227 2233 */
2228 2234 if (ddi_copyout(&f32scsi,
2229 2235 (void *)base_addr,
2230 2236 sizeof (struct fcp32_scsi_cmd),
2231 2237 mode)) {
2232 2238 return (EFAULT);
2233 2239 }
2234 2240 break;
2235 2241 case DDI_MODEL_NONE:
2236 2242 /*
2237 2243 * Copy data to user address space
2238 2244 */
2239 2245 if (ddi_copyout(fscsi,
2240 2246 (void *)base_addr,
2241 2247 sizeof (struct fcp_scsi_cmd),
2242 2248 mode)) {
2243 2249 return (EFAULT);
2244 2250 }
2245 2251 break;
2246 2252 }
2247 2253 #else /* _MULTI_DATAMODEL */
2248 2254 /*
2249 2255 * Copy data to user address space
2250 2256 */
2251 2257 if (ddi_copyout(fscsi,
2252 2258 (void *)base_addr,
2253 2259 sizeof (struct fcp_scsi_cmd),
2254 2260 mode)) {
2255 2261 return (EFAULT);
2256 2262 }
2257 2263 #endif /* _MULTI_DATAMODEL */
2258 2264
2259 2265 return (0);
2260 2266 }
2261 2267
2262 2268
2263 2269 /*
2264 2270 * fcp_send_scsi_ioctl
2265 2271 * Sends the SCSI command in blocking mode.
2266 2272 *
2267 2273 * Input:
2268 2274 * fscsi = SCSI command data structure
2269 2275 *
2270 2276 * Output:
2271 2277 * fscsi = SCSI command data structure
2272 2278 *
2273 2279 * Returns:
2274 2280 * 0 = OK
2275 2281 * EAGAIN = See errno.h
2276 2282 * EBUSY = See errno.h
2277 2283 * EINTR = See errno.h
2278 2284 * EINVAL = See errno.h
2279 2285 * EIO = See errno.h
2280 2286 * ENOMEM = See errno.h
2281 2287 * ENXIO = See errno.h
2282 2288 *
2283 2289 * Context:
2284 2290 * Kernel context.
2285 2291 */
2286 2292 static int
2287 2293 fcp_send_scsi_ioctl(struct fcp_scsi_cmd *fscsi)
2288 2294 {
2289 2295 struct fcp_lun *plun = NULL;
2290 2296 struct fcp_port *pptr = NULL;
2291 2297 struct fcp_tgt *ptgt = NULL;
2292 2298 fc_packet_t *fpkt = NULL;
2293 2299 struct fcp_ipkt *icmd = NULL;
2294 2300 int target_created = FALSE;
2295 2301 fc_frame_hdr_t *hp;
2296 2302 struct fcp_cmd fcp_cmd;
2297 2303 struct fcp_cmd *fcmd;
2298 2304 union scsi_cdb *scsi_cdb;
2299 2305 la_wwn_t *wwn_ptr;
2300 2306 int nodma;
2301 2307 struct fcp_rsp *rsp;
2302 2308 struct fcp_rsp_info *rsp_info;
2303 2309 caddr_t rsp_sense;
2304 2310 int buf_len;
2305 2311 int info_len;
2306 2312 int sense_len;
2307 2313 struct scsi_extended_sense *sense_to = NULL;
2308 2314 timeout_id_t tid;
2309 2315 uint8_t reconfig_lun = FALSE;
2310 2316 uint8_t reconfig_pending = FALSE;
2311 2317 uint8_t scsi_cmd;
2312 2318 int rsp_len;
2313 2319 int cmd_index;
2314 2320 int fc_status;
2315 2321 int pkt_state;
2316 2322 int pkt_action;
2317 2323 int pkt_reason;
2318 2324 int ret, xport_retval = ~FC_SUCCESS;
2319 2325 int lcount;
2320 2326 int tcount;
2321 2327 int reconfig_status;
2322 2328 int port_busy = FALSE;
2323 2329 uchar_t *lun_string;
2324 2330
2325 2331 /*
2326 2332 * Check valid SCSI command
2327 2333 */
2328 2334 scsi_cmd = ((uint8_t *)fscsi->scsi_cdbbufaddr)[0];
2329 2335 ret = EINVAL;
2330 2336 for (cmd_index = 0;
2331 2337 cmd_index < FCP_NUM_ELEMENTS(scsi_ioctl_list) &&
2332 2338 ret != 0;
2333 2339 cmd_index++) {
2334 2340 /*
2335 2341 * First byte of CDB is the SCSI command
2336 2342 */
2337 2343 if (scsi_ioctl_list[cmd_index] == scsi_cmd) {
2338 2344 ret = 0;
2339 2345 }
2340 2346 }
2341 2347
2342 2348 /*
2343 2349 * Check inputs
2344 2350 */
2345 2351 if (fscsi->scsi_flags != FCP_SCSI_READ) {
2346 2352 ret = EINVAL;
2347 2353 } else if (fscsi->scsi_cdblen > FCP_CDB_SIZE) {
2348 2354 /* no larger than */
2349 2355 ret = EINVAL;
2350 2356 }
2351 2357
2352 2358
2353 2359 /*
2354 2360 * Find FC port
2355 2361 */
2356 2362 if (ret == 0) {
2357 2363 /*
2358 2364 * Acquire global mutex
2359 2365 */
2360 2366 mutex_enter(&fcp_global_mutex);
2361 2367
2362 2368 pptr = fcp_port_head;
2363 2369 while (pptr) {
2364 2370 if (pptr->port_instance ==
2365 2371 (uint32_t)fscsi->scsi_fc_port_num) {
2366 2372 break;
2367 2373 } else {
2368 2374 pptr = pptr->port_next;
2369 2375 }
2370 2376 }
2371 2377
2372 2378 if (pptr == NULL) {
2373 2379 ret = ENXIO;
2374 2380 } else {
2375 2381 /*
2376 2382 * fc_ulp_busy_port can raise power
2377 2383 * so, we must not hold any mutexes involved in PM
2378 2384 */
2379 2385 mutex_exit(&fcp_global_mutex);
2380 2386 ret = fc_ulp_busy_port(pptr->port_fp_handle);
2381 2387 }
2382 2388
2383 2389 if (ret == 0) {
2384 2390
2385 2391 /* remember port is busy, so we will release later */
2386 2392 port_busy = TRUE;
2387 2393
2388 2394 /*
2389 2395 * If there is a reconfiguration in progress, wait
2390 2396 * for it to complete.
2391 2397 */
2392 2398
2393 2399 fcp_reconfig_wait(pptr);
2394 2400
2395 2401 /* reacquire mutexes in order */
2396 2402 mutex_enter(&fcp_global_mutex);
2397 2403 mutex_enter(&pptr->port_mutex);
2398 2404
2399 2405 /*
2400 2406 * Will port accept DMA?
2401 2407 */
2402 2408 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE)
2403 2409 ? 1 : 0;
2404 2410
2405 2411 /*
2406 2412 * If init or offline, device not known
2407 2413 *
2408 2414 * If we are discovering (onlining), we can
2409 2415 * NOT obviously provide reliable data about
2410 2416 * devices until it is complete
2411 2417 */
2412 2418 if (pptr->port_state & (FCP_STATE_INIT |
2413 2419 FCP_STATE_OFFLINE)) {
2414 2420 ret = ENXIO;
2415 2421 } else if (pptr->port_state & FCP_STATE_ONLINING) {
2416 2422 ret = EBUSY;
2417 2423 } else {
2418 2424 /*
2419 2425 * Find target from pwwn
2420 2426 *
2421 2427 * The wwn must be put into a local
2422 2428 * variable to ensure alignment.
2423 2429 */
2424 2430 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2425 2431 ptgt = fcp_lookup_target(pptr,
2426 2432 (uchar_t *)wwn_ptr);
2427 2433
2428 2434 /*
2429 2435 * If target not found,
2430 2436 */
2431 2437 if (ptgt == NULL) {
2432 2438 /*
2433 2439 * Note: Still have global &
2434 2440 * port mutexes
2435 2441 */
2436 2442 mutex_exit(&pptr->port_mutex);
2437 2443 ptgt = fcp_port_create_tgt(pptr,
2438 2444 wwn_ptr, &ret, &fc_status,
2439 2445 &pkt_state, &pkt_action,
2440 2446 &pkt_reason);
2441 2447 mutex_enter(&pptr->port_mutex);
2442 2448
2443 2449 fscsi->scsi_fc_status = fc_status;
2444 2450 fscsi->scsi_pkt_state =
2445 2451 (uchar_t)pkt_state;
2446 2452 fscsi->scsi_pkt_reason = pkt_reason;
2447 2453 fscsi->scsi_pkt_action =
2448 2454 (uchar_t)pkt_action;
2449 2455
2450 2456 if (ptgt != NULL) {
2451 2457 target_created = TRUE;
2452 2458 } else if (ret == 0) {
2453 2459 ret = ENOMEM;
2454 2460 }
2455 2461 }
2456 2462
2457 2463 if (ret == 0) {
2458 2464 /*
2459 2465 * Acquire target
2460 2466 */
2461 2467 mutex_enter(&ptgt->tgt_mutex);
2462 2468
2463 2469 /*
2464 2470 * If target is mark or busy,
2465 2471 * then target can not be used
2466 2472 */
2467 2473 if (ptgt->tgt_state &
2468 2474 (FCP_TGT_MARK |
2469 2475 FCP_TGT_BUSY)) {
2470 2476 ret = EBUSY;
2471 2477 } else {
2472 2478 /*
2473 2479 * Mark target as busy
2474 2480 */
2475 2481 ptgt->tgt_state |=
2476 2482 FCP_TGT_BUSY;
2477 2483 }
2478 2484
2479 2485 /*
2480 2486 * Release target
2481 2487 */
2482 2488 lcount = pptr->port_link_cnt;
2483 2489 tcount = ptgt->tgt_change_cnt;
2484 2490 mutex_exit(&ptgt->tgt_mutex);
2485 2491 }
2486 2492 }
2487 2493
2488 2494 /*
2489 2495 * Release port
2490 2496 */
2491 2497 mutex_exit(&pptr->port_mutex);
2492 2498 }
2493 2499
2494 2500 /*
2495 2501 * Release global mutex
2496 2502 */
2497 2503 mutex_exit(&fcp_global_mutex);
2498 2504 }
2499 2505
2500 2506 if (ret == 0) {
2501 2507 uint64_t belun = BE_64(fscsi->scsi_lun);
2502 2508
2503 2509 /*
2504 2510 * If it's a target device, find lun from pwwn
2505 2511 * The wwn must be put into a local
2506 2512 * variable to ensure alignment.
2507 2513 */
2508 2514 mutex_enter(&pptr->port_mutex);
2509 2515 wwn_ptr = (la_wwn_t *)&(fscsi->scsi_fc_pwwn);
2510 2516 if (!ptgt->tgt_tcap && ptgt->tgt_icap) {
2511 2517 /* this is not a target */
2512 2518 fscsi->scsi_fc_status = FC_DEVICE_NOT_TGT;
2513 2519 ret = ENXIO;
2514 2520 } else if ((belun << 16) != 0) {
2515 2521 /*
2516 2522 * Since fcp only support PD and LU addressing method
2517 2523 * so far, the last 6 bytes of a valid LUN are expected
2518 2524 * to be filled with 00h.
2519 2525 */
2520 2526 fscsi->scsi_fc_status = FC_INVALID_LUN;
2521 2527 cmn_err(CE_WARN, "fcp: Unsupported LUN addressing"
2522 2528 " method 0x%02x with LUN number 0x%016" PRIx64,
2523 2529 (uint8_t)(belun >> 62), belun);
2524 2530 ret = ENXIO;
2525 2531 } else if ((plun = fcp_lookup_lun(pptr, (uchar_t *)wwn_ptr,
2526 2532 (uint16_t)((belun >> 48) & 0x3fff))) == NULL) {
2527 2533 /*
2528 2534 * This is a SCSI target, but no LUN at this
2529 2535 * address.
2530 2536 *
2531 2537 * In the future, we may want to send this to
2532 2538 * the target, and let it respond
2533 2539 * appropriately
2534 2540 */
2535 2541 ret = ENXIO;
2536 2542 }
2537 2543 mutex_exit(&pptr->port_mutex);
2538 2544 }
2539 2545
2540 2546 /*
2541 2547 * Finished grabbing external resources
2542 2548 * Allocate internal packet (icmd)
2543 2549 */
2544 2550 if (ret == 0) {
2545 2551 /*
2546 2552 * Calc rsp len assuming rsp info included
2547 2553 */
2548 2554 rsp_len = sizeof (struct fcp_rsp) +
2549 2555 sizeof (struct fcp_rsp_info) + fscsi->scsi_rqlen;
2550 2556
2551 2557 icmd = fcp_icmd_alloc(pptr, ptgt,
2552 2558 sizeof (struct fcp_cmd),
2553 2559 rsp_len,
2554 2560 fscsi->scsi_buflen,
2555 2561 nodma,
2556 2562 lcount, /* ipkt_link_cnt */
2557 2563 tcount, /* ipkt_change_cnt */
2558 2564 0, /* cause */
2559 2565 FC_INVALID_RSCN_COUNT); /* invalidate the count */
2560 2566
2561 2567 if (icmd == NULL) {
2562 2568 ret = ENOMEM;
2563 2569 } else {
2564 2570 /*
2565 2571 * Setup internal packet as sema sync
2566 2572 */
2567 2573 fcp_ipkt_sema_init(icmd);
2568 2574 }
2569 2575 }
2570 2576
2571 2577 if (ret == 0) {
2572 2578 /*
2573 2579 * Init fpkt pointer for use.
2574 2580 */
2575 2581
2576 2582 fpkt = icmd->ipkt_fpkt;
2577 2583
2578 2584 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
2579 2585 fpkt->pkt_tran_type = FC_PKT_FCP_READ; /* only rd for now */
2580 2586 fpkt->pkt_timeout = fscsi->scsi_timeout;
2581 2587
2582 2588 /*
2583 2589 * Init fcmd pointer for use by SCSI command
2584 2590 */
2585 2591
2586 2592 if (nodma) {
2587 2593 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
2588 2594 } else {
2589 2595 fcmd = &fcp_cmd;
2590 2596 }
2591 2597 bzero(fcmd, sizeof (struct fcp_cmd));
2592 2598 ptgt = plun->lun_tgt;
2593 2599
2594 2600 lun_string = (uchar_t *)&fscsi->scsi_lun;
2595 2601
2596 2602 fcmd->fcp_ent_addr.ent_addr_0 =
2597 2603 BE_16(*(uint16_t *)&(lun_string[0]));
2598 2604 fcmd->fcp_ent_addr.ent_addr_1 =
2599 2605 BE_16(*(uint16_t *)&(lun_string[2]));
2600 2606 fcmd->fcp_ent_addr.ent_addr_2 =
2601 2607 BE_16(*(uint16_t *)&(lun_string[4]));
2602 2608 fcmd->fcp_ent_addr.ent_addr_3 =
2603 2609 BE_16(*(uint16_t *)&(lun_string[6]));
2604 2610
2605 2611 /*
2606 2612 * Setup internal packet(icmd)
2607 2613 */
2608 2614 icmd->ipkt_lun = plun;
2609 2615 icmd->ipkt_restart = 0;
2610 2616 icmd->ipkt_retries = 0;
2611 2617 icmd->ipkt_opcode = 0;
2612 2618
2613 2619 /*
2614 2620 * Init the frame HEADER Pointer for use
2615 2621 */
2616 2622 hp = &fpkt->pkt_cmd_fhdr;
2617 2623
2618 2624 hp->s_id = pptr->port_id;
2619 2625 hp->d_id = ptgt->tgt_d_id;
2620 2626 hp->r_ctl = R_CTL_COMMAND;
2621 2627 hp->type = FC_TYPE_SCSI_FCP;
2622 2628 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
2623 2629 hp->rsvd = 0;
2624 2630 hp->seq_id = 0;
2625 2631 hp->seq_cnt = 0;
2626 2632 hp->ox_id = 0xffff;
2627 2633 hp->rx_id = 0xffff;
2628 2634 hp->ro = 0;
2629 2635
2630 2636 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
2631 2637 fcmd->fcp_cntl.cntl_read_data = 1; /* only rd for now */
2632 2638 fcmd->fcp_cntl.cntl_write_data = 0;
2633 2639 fcmd->fcp_data_len = fscsi->scsi_buflen;
2634 2640
2635 2641 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
2636 2642 bcopy((char *)fscsi->scsi_cdbbufaddr, (char *)scsi_cdb,
2637 2643 fscsi->scsi_cdblen);
2638 2644
2639 2645 if (!nodma) {
2640 2646 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
2641 2647 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
2642 2648 }
2643 2649
2644 2650 /*
2645 2651 * Send SCSI command to FC transport
2646 2652 */
2647 2653
2648 2654 if (ret == 0) {
2649 2655 mutex_enter(&ptgt->tgt_mutex);
2650 2656
2651 2657 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
2652 2658 mutex_exit(&ptgt->tgt_mutex);
2653 2659 fscsi->scsi_fc_status = xport_retval =
2654 2660 fc_ulp_transport(pptr->port_fp_handle,
2655 2661 fpkt);
2656 2662 if (fscsi->scsi_fc_status != FC_SUCCESS) {
2657 2663 ret = EIO;
2658 2664 }
2659 2665 } else {
2660 2666 mutex_exit(&ptgt->tgt_mutex);
2661 2667 ret = EBUSY;
2662 2668 }
2663 2669 }
2664 2670 }
2665 2671
2666 2672 /*
2667 2673 * Wait for completion only if fc_ulp_transport was called and it
2668 2674 * returned a success. This is the only time callback will happen.
2669 2675 * Otherwise, there is no point in waiting
2670 2676 */
2671 2677 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2672 2678 ret = fcp_ipkt_sema_wait(icmd);
2673 2679 }
2674 2680
2675 2681 /*
2676 2682 * Copy data to IOCTL data structures
2677 2683 */
2678 2684 rsp = NULL;
2679 2685 if ((ret == 0) && (xport_retval == FC_SUCCESS)) {
2680 2686 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
2681 2687
2682 2688 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
2683 2689 fcp_log(CE_WARN, pptr->port_dip,
2684 2690 "!SCSI command to d_id=0x%x lun=0x%x"
2685 2691 " failed, Bad FCP response values:"
2686 2692 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
2687 2693 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
2688 2694 ptgt->tgt_d_id, plun->lun_num,
2689 2695 rsp->reserved_0, rsp->reserved_1,
2690 2696 rsp->fcp_u.fcp_status.reserved_0,
2691 2697 rsp->fcp_u.fcp_status.reserved_1,
2692 2698 rsp->fcp_response_len, rsp->fcp_sense_len);
2693 2699
2694 2700 ret = EIO;
2695 2701 }
2696 2702 }
2697 2703
2698 2704 if ((ret == 0) && (rsp != NULL)) {
2699 2705 /*
2700 2706 * Calc response lengths
2701 2707 */
2702 2708 sense_len = 0;
2703 2709 info_len = 0;
2704 2710
2705 2711 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2706 2712 info_len = rsp->fcp_response_len;
2707 2713 }
2708 2714
2709 2715 rsp_info = (struct fcp_rsp_info *)
2710 2716 ((uint8_t *)rsp + sizeof (struct fcp_rsp));
2711 2717
2712 2718 /*
2713 2719 * Get SCSI status
2714 2720 */
2715 2721 fscsi->scsi_bufstatus = rsp->fcp_u.fcp_status.scsi_status;
2716 2722 /*
2717 2723 * If a lun was just added or removed and the next command
2718 2724 * comes through this interface, we need to capture the check
2719 2725 * condition so we can discover the new topology.
2720 2726 */
2721 2727 if (fscsi->scsi_bufstatus != STATUS_GOOD &&
2722 2728 rsp->fcp_u.fcp_status.sense_len_set) {
2723 2729 sense_len = rsp->fcp_sense_len;
2724 2730 rsp_sense = (caddr_t)((uint8_t *)rsp_info + info_len);
2725 2731 sense_to = (struct scsi_extended_sense *)rsp_sense;
2726 2732 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
2727 2733 (FCP_SENSE_NO_LUN(sense_to))) {
2728 2734 reconfig_lun = TRUE;
2729 2735 }
2730 2736 }
2731 2737
2732 2738 if (fscsi->scsi_bufstatus == STATUS_GOOD && (ptgt != NULL) &&
2733 2739 (reconfig_lun || (scsi_cdb->scc_cmd == SCMD_REPORT_LUN))) {
2734 2740 if (reconfig_lun == FALSE) {
2735 2741 reconfig_status =
2736 2742 fcp_is_reconfig_needed(ptgt, fpkt);
2737 2743 }
2738 2744
2739 2745 if ((reconfig_lun == TRUE) ||
2740 2746 (reconfig_status == TRUE)) {
2741 2747 mutex_enter(&ptgt->tgt_mutex);
2742 2748 if (ptgt->tgt_tid == NULL) {
2743 2749 /*
2744 2750 * Either we've been notified the
2745 2751 * REPORT_LUN data has changed, or
2746 2752 * we've determined on our own that
2747 2753 * we're out of date. Kick off
2748 2754 * rediscovery.
2749 2755 */
2750 2756 tid = timeout(fcp_reconfigure_luns,
2751 2757 (caddr_t)ptgt, drv_usectohz(1));
2752 2758
2753 2759 ptgt->tgt_tid = tid;
2754 2760 ptgt->tgt_state |= FCP_TGT_BUSY;
2755 2761 ret = EBUSY;
2756 2762 reconfig_pending = TRUE;
2757 2763 }
2758 2764 mutex_exit(&ptgt->tgt_mutex);
2759 2765 }
2760 2766 }
2761 2767
2762 2768 /*
2763 2769 * Calc residuals and buffer lengths
2764 2770 */
2765 2771
2766 2772 if (ret == 0) {
2767 2773 buf_len = fscsi->scsi_buflen;
2768 2774 fscsi->scsi_bufresid = 0;
2769 2775 if (rsp->fcp_u.fcp_status.resid_under) {
2770 2776 if (rsp->fcp_resid <= fscsi->scsi_buflen) {
2771 2777 fscsi->scsi_bufresid = rsp->fcp_resid;
2772 2778 } else {
2773 2779 cmn_err(CE_WARN, "fcp: bad residue %x "
2774 2780 "for txfer len %x", rsp->fcp_resid,
2775 2781 fscsi->scsi_buflen);
2776 2782 fscsi->scsi_bufresid =
2777 2783 fscsi->scsi_buflen;
2778 2784 }
2779 2785 buf_len -= fscsi->scsi_bufresid;
2780 2786 }
2781 2787 if (rsp->fcp_u.fcp_status.resid_over) {
2782 2788 fscsi->scsi_bufresid = -rsp->fcp_resid;
2783 2789 }
2784 2790
2785 2791 fscsi->scsi_rqresid = fscsi->scsi_rqlen - sense_len;
2786 2792 if (fscsi->scsi_rqlen < sense_len) {
2787 2793 sense_len = fscsi->scsi_rqlen;
2788 2794 }
2789 2795
2790 2796 fscsi->scsi_fc_rspcode = 0;
2791 2797 if (rsp->fcp_u.fcp_status.rsp_len_set) {
2792 2798 fscsi->scsi_fc_rspcode = rsp_info->rsp_code;
2793 2799 }
2794 2800 fscsi->scsi_pkt_state = fpkt->pkt_state;
2795 2801 fscsi->scsi_pkt_action = fpkt->pkt_action;
2796 2802 fscsi->scsi_pkt_reason = fpkt->pkt_reason;
2797 2803
2798 2804 /*
2799 2805 * Copy data and request sense
2800 2806 *
2801 2807 * Data must be copied by using the FCP_CP_IN macro.
2802 2808 * This will ensure the proper byte order since the data
2803 2809 * is being copied directly from the memory mapped
2804 2810 * device register.
2805 2811 *
2806 2812 * The response (and request sense) will be in the
2807 2813 * correct byte order. No special copy is necessary.
2808 2814 */
2809 2815
2810 2816 if (buf_len) {
2811 2817 FCP_CP_IN(fpkt->pkt_data,
2812 2818 fscsi->scsi_bufaddr,
2813 2819 fpkt->pkt_data_acc,
2814 2820 buf_len);
2815 2821 }
2816 2822 bcopy((void *)rsp_sense,
2817 2823 (void *)fscsi->scsi_rqbufaddr,
2818 2824 sense_len);
2819 2825 }
2820 2826 }
2821 2827
2822 2828 /*
2823 2829 * Cleanup transport data structures if icmd was alloc-ed
2824 2830 * So, cleanup happens in the same thread that icmd was alloc-ed
2825 2831 */
2826 2832 if (icmd != NULL) {
2827 2833 fcp_ipkt_sema_cleanup(icmd);
2828 2834 }
2829 2835
2830 2836 /* restore pm busy/idle status */
2831 2837 if (port_busy) {
2832 2838 fc_ulp_idle_port(pptr->port_fp_handle);
2833 2839 }
2834 2840
2835 2841 /*
2836 2842 * Cleanup target. if a reconfig is pending, don't clear the BUSY
2837 2843 * flag, it'll be cleared when the reconfig is complete.
2838 2844 */
2839 2845 if ((ptgt != NULL) && !reconfig_pending) {
2840 2846 /*
2841 2847 * If target was created,
2842 2848 */
2843 2849 if (target_created) {
2844 2850 mutex_enter(&ptgt->tgt_mutex);
2845 2851 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2846 2852 mutex_exit(&ptgt->tgt_mutex);
2847 2853 } else {
2848 2854 /*
2849 2855 * De-mark target as busy
2850 2856 */
2851 2857 mutex_enter(&ptgt->tgt_mutex);
2852 2858 ptgt->tgt_state &= ~FCP_TGT_BUSY;
2853 2859 mutex_exit(&ptgt->tgt_mutex);
2854 2860 }
2855 2861 }
2856 2862 return (ret);
2857 2863 }
2858 2864
2859 2865
2860 2866 static int
2861 2867 fcp_is_reconfig_needed(struct fcp_tgt *ptgt,
2862 2868 fc_packet_t *fpkt)
2863 2869 {
2864 2870 uchar_t *lun_string;
2865 2871 uint16_t lun_num, i;
2866 2872 int num_luns;
2867 2873 int actual_luns;
2868 2874 int num_masked_luns;
2869 2875 int lun_buflen;
2870 2876 struct fcp_lun *plun = NULL;
2871 2877 struct fcp_reportlun_resp *report_lun;
2872 2878 uint8_t reconfig_needed = FALSE;
2873 2879 uint8_t lun_exists = FALSE;
2874 2880 fcp_port_t *pptr = ptgt->tgt_port;
2875 2881
2876 2882 report_lun = kmem_zalloc(fpkt->pkt_datalen, KM_SLEEP);
2877 2883
2878 2884 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
2879 2885 fpkt->pkt_datalen);
2880 2886
2881 2887 /* get number of luns (which is supplied as LUNS * 8) */
2882 2888 num_luns = BE_32(report_lun->num_lun) >> 3;
2883 2889
2884 2890 /*
2885 2891 * Figure out exactly how many lun strings our response buffer
2886 2892 * can hold.
2887 2893 */
2888 2894 lun_buflen = (fpkt->pkt_datalen -
2889 2895 2 * sizeof (uint32_t)) / sizeof (longlong_t);
2890 2896
2891 2897 /*
2892 2898 * Is our response buffer full or not? We don't want to
2893 2899 * potentially walk beyond the number of luns we have.
2894 2900 */
2895 2901 if (num_luns <= lun_buflen) {
2896 2902 actual_luns = num_luns;
2897 2903 } else {
2898 2904 actual_luns = lun_buflen;
2899 2905 }
2900 2906
2901 2907 mutex_enter(&ptgt->tgt_mutex);
2902 2908
2903 2909 /* Scan each lun to see if we have masked it. */
2904 2910 num_masked_luns = 0;
2905 2911 if (fcp_lun_blacklist != NULL) {
2906 2912 for (i = 0; i < actual_luns; i++) {
2907 2913 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2908 2914 switch (lun_string[0] & 0xC0) {
2909 2915 case FCP_LUN_ADDRESSING:
2910 2916 case FCP_PD_ADDRESSING:
2911 2917 case FCP_VOLUME_ADDRESSING:
2912 2918 lun_num = ((lun_string[0] & 0x3F) << 8)
2913 2919 | lun_string[1];
2914 2920 if (fcp_should_mask(&ptgt->tgt_port_wwn,
2915 2921 lun_num) == TRUE) {
2916 2922 num_masked_luns++;
2917 2923 }
2918 2924 break;
2919 2925 default:
2920 2926 break;
2921 2927 }
2922 2928 }
2923 2929 }
2924 2930
2925 2931 /*
2926 2932 * The quick and easy check. If the number of LUNs reported
2927 2933 * doesn't match the number we currently know about, we need
2928 2934 * to reconfigure.
2929 2935 */
2930 2936 if (num_luns && num_luns != (ptgt->tgt_lun_cnt + num_masked_luns)) {
2931 2937 mutex_exit(&ptgt->tgt_mutex);
2932 2938 kmem_free(report_lun, fpkt->pkt_datalen);
2933 2939 return (TRUE);
2934 2940 }
2935 2941
2936 2942 /*
2937 2943 * If the quick and easy check doesn't turn up anything, we walk
2938 2944 * the list of luns from the REPORT_LUN response and look for
2939 2945 * any luns we don't know about. If we find one, we know we need
2940 2946 * to reconfigure. We will skip LUNs that are masked because of the
2941 2947 * blacklist.
2942 2948 */
2943 2949 for (i = 0; i < actual_luns; i++) {
2944 2950 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
2945 2951 lun_exists = FALSE;
2946 2952 switch (lun_string[0] & 0xC0) {
2947 2953 case FCP_LUN_ADDRESSING:
2948 2954 case FCP_PD_ADDRESSING:
2949 2955 case FCP_VOLUME_ADDRESSING:
2950 2956 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
2951 2957
2952 2958 if ((fcp_lun_blacklist != NULL) && (fcp_should_mask(
2953 2959 &ptgt->tgt_port_wwn, lun_num) == TRUE)) {
2954 2960 lun_exists = TRUE;
2955 2961 break;
2956 2962 }
2957 2963
2958 2964 for (plun = ptgt->tgt_lun; plun;
2959 2965 plun = plun->lun_next) {
2960 2966 if (plun->lun_num == lun_num) {
2961 2967 lun_exists = TRUE;
2962 2968 break;
2963 2969 }
2964 2970 }
2965 2971 break;
2966 2972 default:
2967 2973 break;
2968 2974 }
2969 2975
2970 2976 if (lun_exists == FALSE) {
2971 2977 reconfig_needed = TRUE;
2972 2978 break;
2973 2979 }
2974 2980 }
2975 2981
2976 2982 mutex_exit(&ptgt->tgt_mutex);
2977 2983 kmem_free(report_lun, fpkt->pkt_datalen);
2978 2984
2979 2985 return (reconfig_needed);
2980 2986 }
2981 2987
2982 2988 /*
2983 2989 * This function is called by fcp_handle_page83 and uses inquiry response data
2984 2990 * stored in plun->lun_inq to determine whether or not a device is a member of
2985 2991 * the table fcp_symmetric_disk_table_size. We return 0 if it is in the table,
2986 2992 * otherwise 1.
2987 2993 */
2988 2994 static int
2989 2995 fcp_symmetric_device_probe(struct fcp_lun *plun)
2990 2996 {
2991 2997 struct scsi_inquiry *stdinq = &plun->lun_inq;
2992 2998 char *devidptr;
2993 2999 int i, len;
2994 3000
2995 3001 for (i = 0; i < fcp_symmetric_disk_table_size; i++) {
2996 3002 devidptr = fcp_symmetric_disk_table[i];
2997 3003 len = (int)strlen(devidptr);
2998 3004
2999 3005 if (bcmp(stdinq->inq_vid, devidptr, len) == 0) {
3000 3006 return (0);
3001 3007 }
3002 3008 }
3003 3009 return (1);
3004 3010 }
3005 3011
3006 3012
3007 3013 /*
3008 3014 * This function is called by fcp_ioctl for the FCP_STATE_COUNT ioctl
3009 3015 * It basically returns the current count of # of state change callbacks
3010 3016 * i.e the value of tgt_change_cnt.
3011 3017 *
3012 3018 * INPUT:
3013 3019 * fcp_ioctl.fp_minor -> The minor # of the fp port
3014 3020 * fcp_ioctl.listlen -> 1
3015 3021 * fcp_ioctl.list -> Pointer to a 32 bit integer
3016 3022 */
3017 3023 /*ARGSUSED2*/
3018 3024 static int
3019 3025 fcp_get_statec_count(struct fcp_ioctl *data, int mode, int *rval)
3020 3026 {
3021 3027 int ret;
3022 3028 uint32_t link_cnt;
3023 3029 struct fcp_ioctl fioctl;
3024 3030 struct fcp_port *pptr = NULL;
3025 3031
3026 3032 if ((ret = fcp_copyin_fcp_ioctl_data(data, mode, rval, &fioctl,
3027 3033 &pptr)) != 0) {
3028 3034 return (ret);
3029 3035 }
3030 3036
3031 3037 ASSERT(pptr != NULL);
3032 3038
3033 3039 if (fioctl.listlen != 1) {
3034 3040 return (EINVAL);
3035 3041 }
3036 3042
3037 3043 mutex_enter(&pptr->port_mutex);
3038 3044 if (pptr->port_state & FCP_STATE_OFFLINE) {
3039 3045 mutex_exit(&pptr->port_mutex);
3040 3046 return (ENXIO);
3041 3047 }
3042 3048
3043 3049 /*
3044 3050 * FCP_STATE_INIT is set in 2 cases (not sure why it is overloaded):
3045 3051 * When the fcp initially attaches to the port and there are nothing
3046 3052 * hanging out of the port or if there was a repeat offline state change
3047 3053 * callback (refer fcp_statec_callback() FC_STATE_OFFLINE case).
3048 3054 * In the latter case, port_tmp_cnt will be non-zero and that is how we
3049 3055 * will differentiate the 2 cases.
3050 3056 */
3051 3057 if ((pptr->port_state & FCP_STATE_INIT) && pptr->port_tmp_cnt) {
3052 3058 mutex_exit(&pptr->port_mutex);
3053 3059 return (ENXIO);
3054 3060 }
3055 3061
3056 3062 link_cnt = pptr->port_link_cnt;
3057 3063 mutex_exit(&pptr->port_mutex);
3058 3064
3059 3065 if (ddi_copyout(&link_cnt, fioctl.list, (sizeof (uint32_t)), mode)) {
3060 3066 return (EFAULT);
3061 3067 }
3062 3068
3063 3069 #ifdef _MULTI_DATAMODEL
3064 3070 switch (ddi_model_convert_from(mode & FMODELS)) {
3065 3071 case DDI_MODEL_ILP32: {
3066 3072 struct fcp32_ioctl f32_ioctl;
3067 3073
3068 3074 f32_ioctl.fp_minor = fioctl.fp_minor;
3069 3075 f32_ioctl.listlen = fioctl.listlen;
3070 3076 f32_ioctl.list = (caddr32_t)(long)fioctl.list;
3071 3077 if (ddi_copyout((void *)&f32_ioctl, (void *)data,
3072 3078 sizeof (struct fcp32_ioctl), mode)) {
3073 3079 return (EFAULT);
3074 3080 }
3075 3081 break;
3076 3082 }
3077 3083 case DDI_MODEL_NONE:
3078 3084 if (ddi_copyout((void *)&fioctl, (void *)data,
3079 3085 sizeof (struct fcp_ioctl), mode)) {
3080 3086 return (EFAULT);
3081 3087 }
3082 3088 break;
3083 3089 }
3084 3090 #else /* _MULTI_DATAMODEL */
3085 3091
3086 3092 if (ddi_copyout((void *)&fioctl, (void *)data,
3087 3093 sizeof (struct fcp_ioctl), mode)) {
3088 3094 return (EFAULT);
3089 3095 }
3090 3096 #endif /* _MULTI_DATAMODEL */
3091 3097
3092 3098 return (0);
3093 3099 }
3094 3100
3095 3101 /*
3096 3102 * This function copies the fcp_ioctl structure passed in from user land
3097 3103 * into kernel land. Handles 32 bit applications.
3098 3104 */
3099 3105 /*ARGSUSED*/
3100 3106 static int
3101 3107 fcp_copyin_fcp_ioctl_data(struct fcp_ioctl *data, int mode, int *rval,
3102 3108 struct fcp_ioctl *fioctl, struct fcp_port **pptr)
3103 3109 {
3104 3110 struct fcp_port *t_pptr;
3105 3111
3106 3112 #ifdef _MULTI_DATAMODEL
3107 3113 switch (ddi_model_convert_from(mode & FMODELS)) {
3108 3114 case DDI_MODEL_ILP32: {
3109 3115 struct fcp32_ioctl f32_ioctl;
3110 3116
3111 3117 if (ddi_copyin((void *)data, (void *)&f32_ioctl,
3112 3118 sizeof (struct fcp32_ioctl), mode)) {
3113 3119 return (EFAULT);
3114 3120 }
3115 3121 fioctl->fp_minor = f32_ioctl.fp_minor;
3116 3122 fioctl->listlen = f32_ioctl.listlen;
3117 3123 fioctl->list = (caddr_t)(long)f32_ioctl.list;
3118 3124 break;
3119 3125 }
3120 3126 case DDI_MODEL_NONE:
3121 3127 if (ddi_copyin((void *)data, (void *)fioctl,
3122 3128 sizeof (struct fcp_ioctl), mode)) {
3123 3129 return (EFAULT);
3124 3130 }
3125 3131 break;
3126 3132 }
3127 3133
3128 3134 #else /* _MULTI_DATAMODEL */
3129 3135 if (ddi_copyin((void *)data, (void *)fioctl,
3130 3136 sizeof (struct fcp_ioctl), mode)) {
3131 3137 return (EFAULT);
3132 3138 }
3133 3139 #endif /* _MULTI_DATAMODEL */
3134 3140
3135 3141 /*
3136 3142 * Right now we can assume that the minor number matches with
3137 3143 * this instance of fp. If this changes we will need to
3138 3144 * revisit this logic.
3139 3145 */
3140 3146 mutex_enter(&fcp_global_mutex);
3141 3147 t_pptr = fcp_port_head;
3142 3148 while (t_pptr) {
3143 3149 if (t_pptr->port_instance == (uint32_t)fioctl->fp_minor) {
3144 3150 break;
3145 3151 } else {
3146 3152 t_pptr = t_pptr->port_next;
3147 3153 }
3148 3154 }
3149 3155 *pptr = t_pptr;
3150 3156 mutex_exit(&fcp_global_mutex);
3151 3157 if (t_pptr == NULL) {
3152 3158 return (ENXIO);
3153 3159 }
3154 3160
3155 3161 return (0);
3156 3162 }
3157 3163
3158 3164 /*
3159 3165 * Function: fcp_port_create_tgt
3160 3166 *
3161 3167 * Description: As the name suggest this function creates the target context
3162 3168 * specified by the the WWN provided by the caller. If the
3163 3169 * creation goes well and the target is known by fp/fctl a PLOGI
3164 3170 * followed by a PRLI are issued.
3165 3171 *
3166 3172 * Argument: pptr fcp port structure
3167 3173 * pwwn WWN of the target
3168 3174 * ret_val Address of the return code. It could be:
3169 3175 * EIO, ENOMEM or 0.
3170 3176 * fc_status PLOGI or PRLI status completion
3171 3177 * fc_pkt_state PLOGI or PRLI state completion
3172 3178 * fc_pkt_reason PLOGI or PRLI reason completion
3173 3179 * fc_pkt_action PLOGI or PRLI action completion
3174 3180 *
3175 3181 * Return Value: NULL if it failed
3176 3182 * Target structure address if it succeeds
3177 3183 */
3178 3184 static struct fcp_tgt *
3179 3185 fcp_port_create_tgt(struct fcp_port *pptr, la_wwn_t *pwwn, int *ret_val,
3180 3186 int *fc_status, int *fc_pkt_state, int *fc_pkt_reason, int *fc_pkt_action)
3181 3187 {
3182 3188 struct fcp_tgt *ptgt = NULL;
3183 3189 fc_portmap_t devlist;
3184 3190 int lcount;
3185 3191 int error;
3186 3192
3187 3193 *ret_val = 0;
3188 3194
3189 3195 /*
3190 3196 * Check FC port device & get port map
3191 3197 */
3192 3198 if (fc_ulp_get_remote_port(pptr->port_fp_handle, pwwn,
3193 3199 &error, 1) == NULL) {
3194 3200 *ret_val = EIO;
3195 3201 } else {
3196 3202 if (fc_ulp_pwwn_to_portmap(pptr->port_fp_handle, pwwn,
3197 3203 &devlist) != FC_SUCCESS) {
3198 3204 *ret_val = EIO;
3199 3205 }
3200 3206 }
3201 3207
3202 3208 /* Set port map flags */
3203 3209 devlist.map_type = PORT_DEVICE_USER_CREATE;
3204 3210
3205 3211 /* Allocate target */
3206 3212 if (*ret_val == 0) {
3207 3213 lcount = pptr->port_link_cnt;
3208 3214 ptgt = fcp_alloc_tgt(pptr, &devlist, lcount);
3209 3215 if (ptgt == NULL) {
3210 3216 fcp_log(CE_WARN, pptr->port_dip,
3211 3217 "!FC target allocation failed");
3212 3218 *ret_val = ENOMEM;
3213 3219 } else {
3214 3220 /* Setup target */
3215 3221 mutex_enter(&ptgt->tgt_mutex);
3216 3222
3217 3223 ptgt->tgt_statec_cause = FCP_CAUSE_TGT_CHANGE;
3218 3224 ptgt->tgt_tmp_cnt = 1;
3219 3225 ptgt->tgt_d_id = devlist.map_did.port_id;
3220 3226 ptgt->tgt_hard_addr =
3221 3227 devlist.map_hard_addr.hard_addr;
3222 3228 ptgt->tgt_pd_handle = devlist.map_pd;
3223 3229 ptgt->tgt_fca_dev = NULL;
3224 3230
3225 3231 bcopy(&devlist.map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
3226 3232 FC_WWN_SIZE);
3227 3233 bcopy(&devlist.map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
3228 3234 FC_WWN_SIZE);
3229 3235
3230 3236 mutex_exit(&ptgt->tgt_mutex);
3231 3237 }
3232 3238 }
3233 3239
3234 3240 /* Release global mutex for PLOGI and PRLI */
3235 3241 mutex_exit(&fcp_global_mutex);
3236 3242
3237 3243 /* Send PLOGI (If necessary) */
3238 3244 if (*ret_val == 0) {
3239 3245 *ret_val = fcp_tgt_send_plogi(ptgt, fc_status,
3240 3246 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3241 3247 }
3242 3248
3243 3249 /* Send PRLI (If necessary) */
3244 3250 if (*ret_val == 0) {
3245 3251 *ret_val = fcp_tgt_send_prli(ptgt, fc_status,
3246 3252 fc_pkt_state, fc_pkt_reason, fc_pkt_action);
3247 3253 }
3248 3254
3249 3255 mutex_enter(&fcp_global_mutex);
3250 3256
3251 3257 return (ptgt);
3252 3258 }
3253 3259
3254 3260 /*
3255 3261 * Function: fcp_tgt_send_plogi
3256 3262 *
3257 3263 * Description: This function sends a PLOGI to the target specified by the
3258 3264 * caller and waits till it completes.
3259 3265 *
3260 3266 * Argument: ptgt Target to send the plogi to.
3261 3267 * fc_status Status returned by fp/fctl in the PLOGI request.
3262 3268 * fc_pkt_state State returned by fp/fctl in the PLOGI request.
3263 3269 * fc_pkt_reason Reason returned by fp/fctl in the PLOGI request.
3264 3270 * fc_pkt_action Action returned by fp/fctl in the PLOGI request.
3265 3271 *
3266 3272 * Return Value: 0
3267 3273 * ENOMEM
3268 3274 * EIO
3269 3275 *
3270 3276 * Context: User context.
3271 3277 */
3272 3278 static int
3273 3279 fcp_tgt_send_plogi(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3274 3280 int *fc_pkt_reason, int *fc_pkt_action)
3275 3281 {
3276 3282 struct fcp_port *pptr;
3277 3283 struct fcp_ipkt *icmd;
3278 3284 struct fc_packet *fpkt;
3279 3285 fc_frame_hdr_t *hp;
3280 3286 struct la_els_logi logi;
3281 3287 int tcount;
3282 3288 int lcount;
3283 3289 int ret, login_retval = ~FC_SUCCESS;
3284 3290
3285 3291 ret = 0;
3286 3292
3287 3293 pptr = ptgt->tgt_port;
3288 3294
3289 3295 lcount = pptr->port_link_cnt;
3290 3296 tcount = ptgt->tgt_change_cnt;
3291 3297
3292 3298 /* Alloc internal packet */
3293 3299 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_logi_t),
3294 3300 sizeof (la_els_logi_t), 0,
3295 3301 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
3296 3302 lcount, tcount, 0, FC_INVALID_RSCN_COUNT);
3297 3303
3298 3304 if (icmd == NULL) {
3299 3305 ret = ENOMEM;
3300 3306 } else {
3301 3307 /*
3302 3308 * Setup internal packet as sema sync
3303 3309 */
3304 3310 fcp_ipkt_sema_init(icmd);
3305 3311
3306 3312 /*
3307 3313 * Setup internal packet (icmd)
3308 3314 */
3309 3315 icmd->ipkt_lun = NULL;
3310 3316 icmd->ipkt_restart = 0;
3311 3317 icmd->ipkt_retries = 0;
3312 3318 icmd->ipkt_opcode = LA_ELS_PLOGI;
3313 3319
3314 3320 /*
3315 3321 * Setup fc_packet
3316 3322 */
3317 3323 fpkt = icmd->ipkt_fpkt;
3318 3324
3319 3325 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
3320 3326 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
3321 3327 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
3322 3328
3323 3329 /*
3324 3330 * Setup FC frame header
3325 3331 */
3326 3332 hp = &fpkt->pkt_cmd_fhdr;
3327 3333
3328 3334 hp->s_id = pptr->port_id; /* source ID */
3329 3335 hp->d_id = ptgt->tgt_d_id; /* dest ID */
3330 3336 hp->r_ctl = R_CTL_ELS_REQ;
3331 3337 hp->type = FC_TYPE_EXTENDED_LS;
3332 3338 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
3333 3339 hp->seq_id = 0;
3334 3340 hp->rsvd = 0;
3335 3341 hp->df_ctl = 0;
3336 3342 hp->seq_cnt = 0;
3337 3343 hp->ox_id = 0xffff; /* i.e. none */
3338 3344 hp->rx_id = 0xffff; /* i.e. none */
3339 3345 hp->ro = 0;
3340 3346
3341 3347 /*
3342 3348 * Setup PLOGI
3343 3349 */
3344 3350 bzero(&logi, sizeof (struct la_els_logi));
3345 3351 logi.ls_code.ls_code = LA_ELS_PLOGI;
3346 3352
3347 3353 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
3348 3354 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
3349 3355
3350 3356 /*
3351 3357 * Send PLOGI
3352 3358 */
3353 3359 *fc_status = login_retval =
3354 3360 fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
3355 3361 if (*fc_status != FC_SUCCESS) {
3356 3362 ret = EIO;
3357 3363 }
3358 3364 }
3359 3365
3360 3366 /*
3361 3367 * Wait for completion
3362 3368 */
3363 3369 if ((ret == 0) && (login_retval == FC_SUCCESS)) {
3364 3370 ret = fcp_ipkt_sema_wait(icmd);
3365 3371
3366 3372 *fc_pkt_state = fpkt->pkt_state;
3367 3373 *fc_pkt_reason = fpkt->pkt_reason;
3368 3374 *fc_pkt_action = fpkt->pkt_action;
3369 3375 }
3370 3376
3371 3377 /*
3372 3378 * Cleanup transport data structures if icmd was alloc-ed AND if there
3373 3379 * is going to be no callback (i.e if fc_ulp_login() failed).
3374 3380 * Otherwise, cleanup happens in callback routine.
3375 3381 */
3376 3382 if (icmd != NULL) {
3377 3383 fcp_ipkt_sema_cleanup(icmd);
3378 3384 }
3379 3385
3380 3386 return (ret);
3381 3387 }
3382 3388
3383 3389 /*
3384 3390 * Function: fcp_tgt_send_prli
3385 3391 *
3386 3392 * Description: Does nothing as of today.
3387 3393 *
3388 3394 * Argument: ptgt Target to send the prli to.
3389 3395 * fc_status Status returned by fp/fctl in the PRLI request.
3390 3396 * fc_pkt_state State returned by fp/fctl in the PRLI request.
3391 3397 * fc_pkt_reason Reason returned by fp/fctl in the PRLI request.
3392 3398 * fc_pkt_action Action returned by fp/fctl in the PRLI request.
3393 3399 *
3394 3400 * Return Value: 0
3395 3401 */
3396 3402 /*ARGSUSED*/
3397 3403 static int
3398 3404 fcp_tgt_send_prli(struct fcp_tgt *ptgt, int *fc_status, int *fc_pkt_state,
3399 3405 int *fc_pkt_reason, int *fc_pkt_action)
3400 3406 {
3401 3407 return (0);
3402 3408 }
3403 3409
3404 3410 /*
3405 3411 * Function: fcp_ipkt_sema_init
3406 3412 *
3407 3413 * Description: Initializes the semaphore contained in the internal packet.
3408 3414 *
3409 3415 * Argument: icmd Internal packet the semaphore of which must be
3410 3416 * initialized.
3411 3417 *
3412 3418 * Return Value: None
3413 3419 *
3414 3420 * Context: User context only.
3415 3421 */
3416 3422 static void
3417 3423 fcp_ipkt_sema_init(struct fcp_ipkt *icmd)
3418 3424 {
3419 3425 struct fc_packet *fpkt;
3420 3426
3421 3427 fpkt = icmd->ipkt_fpkt;
3422 3428
3423 3429 /* Create semaphore for sync */
3424 3430 sema_init(&(icmd->ipkt_sema), 0, NULL, SEMA_DRIVER, NULL);
3425 3431
3426 3432 /* Setup the completion callback */
3427 3433 fpkt->pkt_comp = fcp_ipkt_sema_callback;
3428 3434 }
3429 3435
3430 3436 /*
3431 3437 * Function: fcp_ipkt_sema_wait
3432 3438 *
3433 3439 * Description: Wait on the semaphore embedded in the internal packet. The
3434 3440 * semaphore is released in the callback.
3435 3441 *
3436 3442 * Argument: icmd Internal packet to wait on for completion.
3437 3443 *
3438 3444 * Return Value: 0
3439 3445 * EIO
3440 3446 * EBUSY
3441 3447 * EAGAIN
3442 3448 *
3443 3449 * Context: User context only.
3444 3450 *
3445 3451 * This function does a conversion between the field pkt_state of the fc_packet
3446 3452 * embedded in the internal packet (icmd) and the code it returns.
3447 3453 */
3448 3454 static int
3449 3455 fcp_ipkt_sema_wait(struct fcp_ipkt *icmd)
3450 3456 {
3451 3457 struct fc_packet *fpkt;
3452 3458 int ret;
3453 3459
3454 3460 ret = EIO;
3455 3461 fpkt = icmd->ipkt_fpkt;
3456 3462
3457 3463 /*
3458 3464 * Wait on semaphore
3459 3465 */
3460 3466 sema_p(&(icmd->ipkt_sema));
3461 3467
3462 3468 /*
3463 3469 * Check the status of the FC packet
3464 3470 */
3465 3471 switch (fpkt->pkt_state) {
3466 3472 case FC_PKT_SUCCESS:
3467 3473 ret = 0;
3468 3474 break;
3469 3475 case FC_PKT_LOCAL_RJT:
3470 3476 switch (fpkt->pkt_reason) {
3471 3477 case FC_REASON_SEQ_TIMEOUT:
3472 3478 case FC_REASON_RX_BUF_TIMEOUT:
3473 3479 ret = EAGAIN;
3474 3480 break;
3475 3481 case FC_REASON_PKT_BUSY:
3476 3482 ret = EBUSY;
3477 3483 break;
3478 3484 }
3479 3485 break;
3480 3486 case FC_PKT_TIMEOUT:
3481 3487 ret = EAGAIN;
3482 3488 break;
3483 3489 case FC_PKT_LOCAL_BSY:
3484 3490 case FC_PKT_TRAN_BSY:
3485 3491 case FC_PKT_NPORT_BSY:
3486 3492 case FC_PKT_FABRIC_BSY:
3487 3493 ret = EBUSY;
3488 3494 break;
3489 3495 case FC_PKT_LS_RJT:
3490 3496 case FC_PKT_BA_RJT:
3491 3497 switch (fpkt->pkt_reason) {
3492 3498 case FC_REASON_LOGICAL_BSY:
3493 3499 ret = EBUSY;
3494 3500 break;
3495 3501 }
3496 3502 break;
3497 3503 case FC_PKT_FS_RJT:
3498 3504 switch (fpkt->pkt_reason) {
3499 3505 case FC_REASON_FS_LOGICAL_BUSY:
3500 3506 ret = EBUSY;
3501 3507 break;
3502 3508 }
3503 3509 break;
3504 3510 }
3505 3511
3506 3512 return (ret);
3507 3513 }
3508 3514
3509 3515 /*
3510 3516 * Function: fcp_ipkt_sema_callback
3511 3517 *
3512 3518 * Description: Registered as the completion callback function for the FC
3513 3519 * transport when the ipkt semaphore is used for sync. This will
3514 3520 * cleanup the used data structures, if necessary and wake up
3515 3521 * the user thread to complete the transaction.
3516 3522 *
3517 3523 * Argument: fpkt FC packet (points to the icmd)
3518 3524 *
3519 3525 * Return Value: None
3520 3526 *
3521 3527 * Context: User context only
3522 3528 */
3523 3529 static void
3524 3530 fcp_ipkt_sema_callback(struct fc_packet *fpkt)
3525 3531 {
3526 3532 struct fcp_ipkt *icmd;
3527 3533
3528 3534 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
3529 3535
3530 3536 /*
3531 3537 * Wake up user thread
3532 3538 */
3533 3539 sema_v(&(icmd->ipkt_sema));
3534 3540 }
3535 3541
3536 3542 /*
3537 3543 * Function: fcp_ipkt_sema_cleanup
3538 3544 *
3539 3545 * Description: Called to cleanup (if necessary) the data structures used
3540 3546 * when ipkt sema is used for sync. This function will detect
3541 3547 * whether the caller is the last thread (via counter) and
3542 3548 * cleanup only if necessary.
3543 3549 *
3544 3550 * Argument: icmd Internal command packet
3545 3551 *
3546 3552 * Return Value: None
3547 3553 *
3548 3554 * Context: User context only
3549 3555 */
3550 3556 static void
3551 3557 fcp_ipkt_sema_cleanup(struct fcp_ipkt *icmd)
3552 3558 {
3553 3559 struct fcp_tgt *ptgt;
3554 3560 struct fcp_port *pptr;
3555 3561
3556 3562 ptgt = icmd->ipkt_tgt;
3557 3563 pptr = icmd->ipkt_port;
3558 3564
3559 3565 /*
3560 3566 * Acquire data structure
3561 3567 */
3562 3568 mutex_enter(&ptgt->tgt_mutex);
3563 3569
3564 3570 /*
3565 3571 * Destroy semaphore
3566 3572 */
3567 3573 sema_destroy(&(icmd->ipkt_sema));
3568 3574
3569 3575 /*
3570 3576 * Cleanup internal packet
3571 3577 */
3572 3578 mutex_exit(&ptgt->tgt_mutex);
3573 3579 fcp_icmd_free(pptr, icmd);
3574 3580 }
3575 3581
3576 3582 /*
3577 3583 * Function: fcp_port_attach
3578 3584 *
3579 3585 * Description: Called by the transport framework to resume, suspend or
3580 3586 * attach a new port.
3581 3587 *
3582 3588 * Argument: ulph Port handle
3583 3589 * *pinfo Port information
3584 3590 * cmd Command
3585 3591 * s_id Port ID
3586 3592 *
3587 3593 * Return Value: FC_FAILURE or FC_SUCCESS
3588 3594 */
3589 3595 /*ARGSUSED*/
3590 3596 static int
3591 3597 fcp_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
3592 3598 fc_attach_cmd_t cmd, uint32_t s_id)
3593 3599 {
3594 3600 int instance;
3595 3601 int res = FC_FAILURE; /* default result */
3596 3602
3597 3603 ASSERT(pinfo != NULL);
3598 3604
3599 3605 instance = ddi_get_instance(pinfo->port_dip);
3600 3606
3601 3607 switch (cmd) {
3602 3608 case FC_CMD_ATTACH:
3603 3609 /*
3604 3610 * this port instance attaching for the first time (or after
3605 3611 * being detached before)
3606 3612 */
3607 3613 if (fcp_handle_port_attach(ulph, pinfo, s_id,
3608 3614 instance) == DDI_SUCCESS) {
3609 3615 res = FC_SUCCESS;
3610 3616 } else {
3611 3617 ASSERT(ddi_get_soft_state(fcp_softstate,
3612 3618 instance) == NULL);
3613 3619 }
3614 3620 break;
3615 3621
3616 3622 case FC_CMD_RESUME:
3617 3623 case FC_CMD_POWER_UP:
3618 3624 /*
3619 3625 * this port instance was attached and the suspended and
3620 3626 * will now be resumed
3621 3627 */
3622 3628 if (fcp_handle_port_resume(ulph, pinfo, s_id, cmd,
3623 3629 instance) == DDI_SUCCESS) {
3624 3630 res = FC_SUCCESS;
3625 3631 }
3626 3632 break;
3627 3633
3628 3634 default:
3629 3635 /* shouldn't happen */
3630 3636 FCP_TRACE(fcp_logq, "fcp",
3631 3637 fcp_trace, FCP_BUF_LEVEL_2, 0,
3632 3638 "port_attach: unknown cmdcommand: %d", cmd);
3633 3639 break;
3634 3640 }
3635 3641
3636 3642 /* return result */
3637 3643 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3638 3644 FCP_BUF_LEVEL_1, 0, "fcp_port_attach returning %d", res);
3639 3645
3640 3646 return (res);
3641 3647 }
3642 3648
3643 3649
3644 3650 /*
3645 3651 * detach or suspend this port instance
3646 3652 *
3647 3653 * acquires and releases the global mutex
3648 3654 *
3649 3655 * acquires and releases the mutex for this port
3650 3656 *
3651 3657 * acquires and releases the hotplug mutex for this port
3652 3658 */
3653 3659 /*ARGSUSED*/
3654 3660 static int
3655 3661 fcp_port_detach(opaque_t ulph, fc_ulp_port_info_t *info,
3656 3662 fc_detach_cmd_t cmd)
3657 3663 {
3658 3664 int flag;
3659 3665 int instance;
3660 3666 struct fcp_port *pptr;
3661 3667
3662 3668 instance = ddi_get_instance(info->port_dip);
3663 3669 pptr = ddi_get_soft_state(fcp_softstate, instance);
3664 3670
3665 3671 switch (cmd) {
3666 3672 case FC_CMD_SUSPEND:
3667 3673 FCP_DTRACE(fcp_logq, "fcp",
3668 3674 fcp_trace, FCP_BUF_LEVEL_8, 0,
3669 3675 "port suspend called for port %d", instance);
3670 3676 flag = FCP_STATE_SUSPENDED;
3671 3677 break;
3672 3678
3673 3679 case FC_CMD_POWER_DOWN:
3674 3680 FCP_DTRACE(fcp_logq, "fcp",
3675 3681 fcp_trace, FCP_BUF_LEVEL_8, 0,
3676 3682 "port power down called for port %d", instance);
3677 3683 flag = FCP_STATE_POWER_DOWN;
3678 3684 break;
3679 3685
3680 3686 case FC_CMD_DETACH:
3681 3687 FCP_DTRACE(fcp_logq, "fcp",
3682 3688 fcp_trace, FCP_BUF_LEVEL_8, 0,
3683 3689 "port detach called for port %d", instance);
3684 3690 flag = FCP_STATE_DETACHING;
3685 3691 break;
3686 3692
3687 3693 default:
3688 3694 /* shouldn't happen */
3689 3695 return (FC_FAILURE);
3690 3696 }
3691 3697 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
3692 3698 FCP_BUF_LEVEL_1, 0, "fcp_port_detach returning");
3693 3699
3694 3700 return (fcp_handle_port_detach(pptr, flag, instance));
3695 3701 }
3696 3702
3697 3703
3698 3704 /*
3699 3705 * called for ioctls on the transport's devctl interface, and the transport
3700 3706 * has passed it to us
3701 3707 *
3702 3708 * this will only be called for device control ioctls (i.e. hotplugging stuff)
3703 3709 *
3704 3710 * return FC_SUCCESS if we decide to claim the ioctl,
3705 3711 * else return FC_UNCLAIMED
3706 3712 *
3707 3713 * *rval is set iff we decide to claim the ioctl
3708 3714 */
3709 3715 /*ARGSUSED*/
3710 3716 static int
3711 3717 fcp_port_ioctl(opaque_t ulph, opaque_t port_handle, dev_t dev, int cmd,
3712 3718 intptr_t data, int mode, cred_t *credp, int *rval, uint32_t claimed)
3713 3719 {
3714 3720 int retval = FC_UNCLAIMED; /* return value */
3715 3721 struct fcp_port *pptr = NULL; /* our soft state */
3716 3722 struct devctl_iocdata *dcp = NULL; /* for devctl */
3717 3723 dev_info_t *cdip;
3718 3724 mdi_pathinfo_t *pip = NULL;
3719 3725 char *ndi_nm; /* NDI name */
3720 3726 char *ndi_addr; /* NDI addr */
3721 3727 int is_mpxio, circ;
3722 3728 int devi_entered = 0;
3723 3729 clock_t end_time;
3724 3730
3725 3731 ASSERT(rval != NULL);
3726 3732
3727 3733 FCP_DTRACE(fcp_logq, "fcp",
3728 3734 fcp_trace, FCP_BUF_LEVEL_8, 0,
3729 3735 "fcp_port_ioctl(cmd=0x%x, claimed=%d)", cmd, claimed);
3730 3736
3731 3737 /* if already claimed then forget it */
3732 3738 if (claimed) {
3733 3739 /*
3734 3740 * for now, if this ioctl has already been claimed, then
3735 3741 * we just ignore it
3736 3742 */
3737 3743 return (retval);
3738 3744 }
3739 3745
3740 3746 /* get our port info */
3741 3747 if ((pptr = fcp_get_port(port_handle)) == NULL) {
3742 3748 fcp_log(CE_WARN, NULL,
3743 3749 "!fcp:Invalid port handle handle in ioctl");
3744 3750 *rval = ENXIO;
3745 3751 return (retval);
3746 3752 }
3747 3753 is_mpxio = pptr->port_mpxio;
3748 3754
3749 3755 switch (cmd) {
3750 3756 case DEVCTL_BUS_GETSTATE:
3751 3757 case DEVCTL_BUS_QUIESCE:
3752 3758 case DEVCTL_BUS_UNQUIESCE:
3753 3759 case DEVCTL_BUS_RESET:
3754 3760 case DEVCTL_BUS_RESETALL:
3755 3761
3756 3762 case DEVCTL_BUS_DEV_CREATE:
3757 3763 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3758 3764 return (retval);
3759 3765 }
3760 3766 break;
3761 3767
3762 3768 case DEVCTL_DEVICE_GETSTATE:
3763 3769 case DEVCTL_DEVICE_OFFLINE:
3764 3770 case DEVCTL_DEVICE_ONLINE:
3765 3771 case DEVCTL_DEVICE_REMOVE:
3766 3772 case DEVCTL_DEVICE_RESET:
3767 3773 if (ndi_dc_allochdl((void *)data, &dcp) != NDI_SUCCESS) {
3768 3774 return (retval);
3769 3775 }
3770 3776
3771 3777 ASSERT(dcp != NULL);
3772 3778
3773 3779 /* ensure we have a name and address */
3774 3780 if (((ndi_nm = ndi_dc_getname(dcp)) == NULL) ||
3775 3781 ((ndi_addr = ndi_dc_getaddr(dcp)) == NULL)) {
3776 3782 FCP_TRACE(fcp_logq, pptr->port_instbuf,
3777 3783 fcp_trace, FCP_BUF_LEVEL_2, 0,
3778 3784 "ioctl: can't get name (%s) or addr (%s)",
3779 3785 ndi_nm ? ndi_nm : "<null ptr>",
3780 3786 ndi_addr ? ndi_addr : "<null ptr>");
3781 3787 ndi_dc_freehdl(dcp);
3782 3788 return (retval);
3783 3789 }
3784 3790
3785 3791
3786 3792 /* get our child's DIP */
3787 3793 ASSERT(pptr != NULL);
3788 3794 if (is_mpxio) {
3789 3795 mdi_devi_enter(pptr->port_dip, &circ);
3790 3796 } else {
3791 3797 ndi_devi_enter(pptr->port_dip, &circ);
3792 3798 }
3793 3799 devi_entered = 1;
3794 3800
3795 3801 if ((cdip = ndi_devi_find(pptr->port_dip, ndi_nm,
3796 3802 ndi_addr)) == NULL) {
3797 3803 /* Look for virtually enumerated devices. */
3798 3804 pip = mdi_pi_find(pptr->port_dip, NULL, ndi_addr);
3799 3805 if (pip == NULL ||
3800 3806 ((cdip = mdi_pi_get_client(pip)) == NULL)) {
3801 3807 *rval = ENXIO;
3802 3808 goto out;
3803 3809 }
3804 3810 }
3805 3811 break;
3806 3812
3807 3813 default:
3808 3814 *rval = ENOTTY;
3809 3815 return (retval);
3810 3816 }
3811 3817
3812 3818 /* this ioctl is ours -- process it */
3813 3819
3814 3820 retval = FC_SUCCESS; /* just means we claim the ioctl */
3815 3821
3816 3822 /* we assume it will be a success; else we'll set error value */
3817 3823 *rval = 0;
3818 3824
3819 3825
3820 3826 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
3821 3827 fcp_trace, FCP_BUF_LEVEL_8, 0,
3822 3828 "ioctl: claiming this one");
3823 3829
3824 3830 /* handle ioctls now */
3825 3831 switch (cmd) {
3826 3832 case DEVCTL_DEVICE_GETSTATE:
3827 3833 ASSERT(cdip != NULL);
3828 3834 ASSERT(dcp != NULL);
3829 3835 if (ndi_dc_return_dev_state(cdip, dcp) != NDI_SUCCESS) {
3830 3836 *rval = EFAULT;
3831 3837 }
3832 3838 break;
3833 3839
3834 3840 case DEVCTL_DEVICE_REMOVE:
3835 3841 case DEVCTL_DEVICE_OFFLINE: {
3836 3842 int flag = 0;
3837 3843 int lcount;
3838 3844 int tcount;
3839 3845 struct fcp_pkt *head = NULL;
3840 3846 struct fcp_lun *plun;
3841 3847 child_info_t *cip = CIP(cdip);
3842 3848 int all = 1;
3843 3849 struct fcp_lun *tplun;
3844 3850 struct fcp_tgt *ptgt;
3845 3851
3846 3852 ASSERT(pptr != NULL);
3847 3853 ASSERT(cdip != NULL);
3848 3854
3849 3855 mutex_enter(&pptr->port_mutex);
3850 3856 if (pip != NULL) {
3851 3857 cip = CIP(pip);
3852 3858 }
3853 3859 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3854 3860 mutex_exit(&pptr->port_mutex);
3855 3861 *rval = ENXIO;
3856 3862 break;
3857 3863 }
3858 3864
|
↓ open down ↓ |
3805 lines elided |
↑ open up ↑ |
3859 3865 head = fcp_scan_commands(plun);
3860 3866 if (head != NULL) {
3861 3867 fcp_abort_commands(head, LUN_PORT);
3862 3868 }
3863 3869 lcount = pptr->port_link_cnt;
3864 3870 tcount = plun->lun_tgt->tgt_change_cnt;
3865 3871 mutex_exit(&pptr->port_mutex);
3866 3872
3867 3873 if (cmd == DEVCTL_DEVICE_REMOVE) {
3868 3874 flag = NDI_DEVI_REMOVE;
3875 + if (is_mpxio)
3876 + flag |= NDI_USER_REQ;
3869 3877 }
3870 3878
3871 3879 if (is_mpxio) {
3872 3880 mdi_devi_exit(pptr->port_dip, circ);
3873 3881 } else {
3874 3882 ndi_devi_exit(pptr->port_dip, circ);
3875 3883 }
3876 3884 devi_entered = 0;
3877 3885
3878 3886 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3879 3887 FCP_OFFLINE, lcount, tcount, flag);
3880 3888
3881 3889 if (*rval != NDI_SUCCESS) {
3882 3890 *rval = (*rval == NDI_BUSY) ? EBUSY : EIO;
3883 3891 break;
3884 3892 }
3885 3893
3886 3894 fcp_update_offline_flags(plun);
3887 3895
3888 3896 ptgt = plun->lun_tgt;
3889 3897 mutex_enter(&ptgt->tgt_mutex);
3890 3898 for (tplun = ptgt->tgt_lun; tplun != NULL; tplun =
3891 3899 tplun->lun_next) {
3892 3900 mutex_enter(&tplun->lun_mutex);
3893 3901 if (!(tplun->lun_state & FCP_LUN_OFFLINE)) {
3894 3902 all = 0;
3895 3903 }
3896 3904 mutex_exit(&tplun->lun_mutex);
3897 3905 }
3898 3906
3899 3907 if (all) {
3900 3908 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
3901 3909 /*
3902 3910 * The user is unconfiguring/offlining the device.
3903 3911 * If fabric and the auto configuration is set
3904 3912 * then make sure the user is the only one who
3905 3913 * can reconfigure the device.
3906 3914 */
3907 3915 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
3908 3916 fcp_enable_auto_configuration) {
3909 3917 ptgt->tgt_manual_config_only = 1;
3910 3918 }
3911 3919 }
3912 3920 mutex_exit(&ptgt->tgt_mutex);
3913 3921 break;
3914 3922 }
3915 3923
3916 3924 case DEVCTL_DEVICE_ONLINE: {
3917 3925 int lcount;
3918 3926 int tcount;
3919 3927 struct fcp_lun *plun;
3920 3928 child_info_t *cip = CIP(cdip);
3921 3929
3922 3930 ASSERT(cdip != NULL);
3923 3931 ASSERT(pptr != NULL);
3924 3932
3925 3933 mutex_enter(&pptr->port_mutex);
3926 3934 if (pip != NULL) {
3927 3935 cip = CIP(pip);
3928 3936 }
3929 3937 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
3930 3938 mutex_exit(&pptr->port_mutex);
3931 3939 *rval = ENXIO;
3932 3940 break;
3933 3941 }
3934 3942 lcount = pptr->port_link_cnt;
3935 3943 tcount = plun->lun_tgt->tgt_change_cnt;
3936 3944 mutex_exit(&pptr->port_mutex);
3937 3945
3938 3946 /*
3939 3947 * The FCP_LUN_ONLINING flag is used in fcp_scsi_start()
3940 3948 * to allow the device attach to occur when the device is
3941 3949 * FCP_LUN_OFFLINE (so we don't reject the INQUIRY command
3942 3950 * from the scsi_probe()).
3943 3951 */
3944 3952 mutex_enter(&LUN_TGT->tgt_mutex);
3945 3953 plun->lun_state |= FCP_LUN_ONLINING;
3946 3954 mutex_exit(&LUN_TGT->tgt_mutex);
3947 3955
3948 3956 if (is_mpxio) {
3949 3957 mdi_devi_exit(pptr->port_dip, circ);
3950 3958 } else {
3951 3959 ndi_devi_exit(pptr->port_dip, circ);
3952 3960 }
3953 3961 devi_entered = 0;
3954 3962
3955 3963 *rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
3956 3964 FCP_ONLINE, lcount, tcount, 0);
3957 3965
3958 3966 if (*rval != NDI_SUCCESS) {
3959 3967 /* Reset the FCP_LUN_ONLINING bit */
3960 3968 mutex_enter(&LUN_TGT->tgt_mutex);
3961 3969 plun->lun_state &= ~FCP_LUN_ONLINING;
3962 3970 mutex_exit(&LUN_TGT->tgt_mutex);
3963 3971 *rval = EIO;
3964 3972 break;
3965 3973 }
3966 3974 mutex_enter(&LUN_TGT->tgt_mutex);
3967 3975 plun->lun_state &= ~(FCP_LUN_OFFLINE | FCP_LUN_BUSY |
3968 3976 FCP_LUN_ONLINING);
3969 3977 mutex_exit(&LUN_TGT->tgt_mutex);
3970 3978 break;
3971 3979 }
3972 3980
3973 3981 case DEVCTL_BUS_DEV_CREATE: {
3974 3982 uchar_t *bytes = NULL;
3975 3983 uint_t nbytes;
3976 3984 struct fcp_tgt *ptgt = NULL;
3977 3985 struct fcp_lun *plun = NULL;
3978 3986 dev_info_t *useless_dip = NULL;
3979 3987
3980 3988 *rval = ndi_dc_devi_create(dcp, pptr->port_dip,
3981 3989 DEVCTL_CONSTRUCT, &useless_dip);
3982 3990 if (*rval != 0 || useless_dip == NULL) {
3983 3991 break;
3984 3992 }
3985 3993
3986 3994 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, useless_dip,
3987 3995 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
3988 3996 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
3989 3997 *rval = EINVAL;
3990 3998 (void) ndi_devi_free(useless_dip);
3991 3999 if (bytes != NULL) {
3992 4000 ddi_prop_free(bytes);
3993 4001 }
3994 4002 break;
3995 4003 }
3996 4004
3997 4005 *rval = fcp_create_on_demand(pptr, bytes);
3998 4006 if (*rval == 0) {
3999 4007 mutex_enter(&pptr->port_mutex);
4000 4008 ptgt = fcp_lookup_target(pptr, (uchar_t *)bytes);
4001 4009 if (ptgt) {
4002 4010 /*
4003 4011 * We now have a pointer to the target that
4004 4012 * was created. Lets point to the first LUN on
4005 4013 * this new target.
4006 4014 */
4007 4015 mutex_enter(&ptgt->tgt_mutex);
4008 4016
4009 4017 plun = ptgt->tgt_lun;
4010 4018 /*
4011 4019 * There may be stale/offline LUN entries on
4012 4020 * this list (this is by design) and so we have
4013 4021 * to make sure we point to the first online
4014 4022 * LUN
4015 4023 */
4016 4024 while (plun &&
4017 4025 plun->lun_state & FCP_LUN_OFFLINE) {
4018 4026 plun = plun->lun_next;
4019 4027 }
4020 4028
4021 4029 mutex_exit(&ptgt->tgt_mutex);
4022 4030 }
4023 4031 mutex_exit(&pptr->port_mutex);
4024 4032 }
4025 4033
4026 4034 if (*rval == 0 && ptgt && plun) {
4027 4035 mutex_enter(&plun->lun_mutex);
4028 4036 /*
4029 4037 * Allow up to fcp_lun_ready_retry seconds to
4030 4038 * configure all the luns behind the target.
4031 4039 *
4032 4040 * The intent here is to allow targets with long
4033 4041 * reboot/reset-recovery times to become available
4034 4042 * while limiting the maximum wait time for an
4035 4043 * unresponsive target.
4036 4044 */
4037 4045 end_time = ddi_get_lbolt() +
4038 4046 SEC_TO_TICK(fcp_lun_ready_retry);
4039 4047
4040 4048 while (ddi_get_lbolt() < end_time) {
4041 4049 retval = FC_SUCCESS;
4042 4050
4043 4051 /*
4044 4052 * The new ndi interfaces for on-demand creation
4045 4053 * are inflexible, Do some more work to pass on
4046 4054 * a path name of some LUN (design is broken !)
4047 4055 */
4048 4056 if (plun->lun_cip) {
4049 4057 if (plun->lun_mpxio == 0) {
4050 4058 cdip = DIP(plun->lun_cip);
4051 4059 } else {
4052 4060 cdip = mdi_pi_get_client(
4053 4061 PIP(plun->lun_cip));
4054 4062 }
4055 4063 if (cdip == NULL) {
4056 4064 *rval = ENXIO;
4057 4065 break;
4058 4066 }
4059 4067
4060 4068 if (!i_ddi_devi_attached(cdip)) {
4061 4069 mutex_exit(&plun->lun_mutex);
4062 4070 delay(drv_usectohz(1000000));
4063 4071 mutex_enter(&plun->lun_mutex);
4064 4072 } else {
4065 4073 /*
4066 4074 * This Lun is ready, lets
4067 4075 * check the next one.
4068 4076 */
4069 4077 mutex_exit(&plun->lun_mutex);
4070 4078 plun = plun->lun_next;
4071 4079 while (plun && (plun->lun_state
4072 4080 & FCP_LUN_OFFLINE)) {
4073 4081 plun = plun->lun_next;
4074 4082 }
4075 4083 if (!plun) {
4076 4084 break;
4077 4085 }
4078 4086 mutex_enter(&plun->lun_mutex);
4079 4087 }
4080 4088 } else {
4081 4089 /*
4082 4090 * lun_cip field for a valid lun
4083 4091 * should never be NULL. Fail the
4084 4092 * command.
4085 4093 */
4086 4094 *rval = ENXIO;
4087 4095 break;
4088 4096 }
4089 4097 }
4090 4098 if (plun) {
4091 4099 mutex_exit(&plun->lun_mutex);
4092 4100 } else {
4093 4101 char devnm[MAXNAMELEN];
4094 4102 int nmlen;
4095 4103
4096 4104 nmlen = snprintf(devnm, MAXNAMELEN, "%s@%s",
4097 4105 ddi_node_name(cdip),
4098 4106 ddi_get_name_addr(cdip));
4099 4107
4100 4108 if (copyout(&devnm, dcp->cpyout_buf, nmlen) !=
4101 4109 0) {
4102 4110 *rval = EFAULT;
4103 4111 }
4104 4112 }
4105 4113 } else {
4106 4114 int i;
4107 4115 char buf[25];
4108 4116
4109 4117 for (i = 0; i < FC_WWN_SIZE; i++) {
4110 4118 (void) sprintf(&buf[i << 1], "%02x", bytes[i]);
4111 4119 }
4112 4120
4113 4121 fcp_log(CE_WARN, pptr->port_dip,
4114 4122 "!Failed to create nodes for pwwn=%s; error=%x",
4115 4123 buf, *rval);
4116 4124 }
4117 4125
4118 4126 (void) ndi_devi_free(useless_dip);
4119 4127 ddi_prop_free(bytes);
4120 4128 break;
4121 4129 }
4122 4130
4123 4131 case DEVCTL_DEVICE_RESET: {
4124 4132 struct fcp_lun *plun;
4125 4133 child_info_t *cip = CIP(cdip);
4126 4134
4127 4135 ASSERT(cdip != NULL);
4128 4136 ASSERT(pptr != NULL);
4129 4137 mutex_enter(&pptr->port_mutex);
4130 4138 if (pip != NULL) {
4131 4139 cip = CIP(pip);
4132 4140 }
4133 4141 if ((plun = fcp_get_lun_from_cip(pptr, cip)) == NULL) {
4134 4142 mutex_exit(&pptr->port_mutex);
4135 4143 *rval = ENXIO;
4136 4144 break;
4137 4145 }
4138 4146 mutex_exit(&pptr->port_mutex);
4139 4147
4140 4148 mutex_enter(&plun->lun_tgt->tgt_mutex);
4141 4149 if (!(plun->lun_state & FCP_SCSI_LUN_TGT_INIT)) {
4142 4150 mutex_exit(&plun->lun_tgt->tgt_mutex);
4143 4151
4144 4152 *rval = ENXIO;
4145 4153 break;
4146 4154 }
4147 4155
4148 4156 if (plun->lun_sd == NULL) {
4149 4157 mutex_exit(&plun->lun_tgt->tgt_mutex);
4150 4158
4151 4159 *rval = ENXIO;
4152 4160 break;
4153 4161 }
4154 4162 mutex_exit(&plun->lun_tgt->tgt_mutex);
4155 4163
4156 4164 /*
4157 4165 * set up ap so that fcp_reset can figure out
4158 4166 * which target to reset
4159 4167 */
4160 4168 if (fcp_scsi_reset(&plun->lun_sd->sd_address,
4161 4169 RESET_TARGET) == FALSE) {
4162 4170 *rval = EIO;
4163 4171 }
4164 4172 break;
4165 4173 }
4166 4174
4167 4175 case DEVCTL_BUS_GETSTATE:
4168 4176 ASSERT(dcp != NULL);
4169 4177 ASSERT(pptr != NULL);
4170 4178 ASSERT(pptr->port_dip != NULL);
4171 4179 if (ndi_dc_return_bus_state(pptr->port_dip, dcp) !=
4172 4180 NDI_SUCCESS) {
4173 4181 *rval = EFAULT;
4174 4182 }
4175 4183 break;
4176 4184
4177 4185 case DEVCTL_BUS_QUIESCE:
4178 4186 case DEVCTL_BUS_UNQUIESCE:
4179 4187 *rval = ENOTSUP;
4180 4188 break;
4181 4189
4182 4190 case DEVCTL_BUS_RESET:
4183 4191 case DEVCTL_BUS_RESETALL:
4184 4192 ASSERT(pptr != NULL);
4185 4193 (void) fcp_linkreset(pptr, NULL, KM_SLEEP);
4186 4194 break;
4187 4195
4188 4196 default:
4189 4197 ASSERT(dcp != NULL);
4190 4198 *rval = ENOTTY;
4191 4199 break;
4192 4200 }
4193 4201
4194 4202 /* all done -- clean up and return */
4195 4203 out: if (devi_entered) {
4196 4204 if (is_mpxio) {
4197 4205 mdi_devi_exit(pptr->port_dip, circ);
4198 4206 } else {
4199 4207 ndi_devi_exit(pptr->port_dip, circ);
4200 4208 }
4201 4209 }
4202 4210
4203 4211 if (dcp != NULL) {
4204 4212 ndi_dc_freehdl(dcp);
4205 4213 }
4206 4214
4207 4215 return (retval);
4208 4216 }
4209 4217
4210 4218
4211 4219 /*ARGSUSED*/
4212 4220 static int
4213 4221 fcp_els_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4214 4222 uint32_t claimed)
4215 4223 {
4216 4224 uchar_t r_ctl;
4217 4225 uchar_t ls_code;
4218 4226 struct fcp_port *pptr;
4219 4227
4220 4228 if ((pptr = fcp_get_port(port_handle)) == NULL || claimed) {
4221 4229 return (FC_UNCLAIMED);
4222 4230 }
4223 4231
4224 4232 mutex_enter(&pptr->port_mutex);
4225 4233 if (pptr->port_state & (FCP_STATE_DETACHING |
4226 4234 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4227 4235 mutex_exit(&pptr->port_mutex);
4228 4236 return (FC_UNCLAIMED);
4229 4237 }
4230 4238 mutex_exit(&pptr->port_mutex);
4231 4239
4232 4240 r_ctl = buf->ub_frame.r_ctl;
4233 4241
4234 4242 switch (r_ctl & R_CTL_ROUTING) {
4235 4243 case R_CTL_EXTENDED_SVC:
4236 4244 if (r_ctl == R_CTL_ELS_REQ) {
4237 4245 ls_code = buf->ub_buffer[0];
4238 4246
4239 4247 switch (ls_code) {
4240 4248 case LA_ELS_PRLI:
4241 4249 /*
4242 4250 * We really don't care if something fails.
4243 4251 * If the PRLI was not sent out, then the
4244 4252 * other end will time it out.
4245 4253 */
4246 4254 if (fcp_unsol_prli(pptr, buf) == FC_SUCCESS) {
4247 4255 return (FC_SUCCESS);
4248 4256 }
4249 4257 return (FC_UNCLAIMED);
4250 4258 /* NOTREACHED */
4251 4259
4252 4260 default:
4253 4261 break;
4254 4262 }
4255 4263 }
4256 4264 /* FALLTHROUGH */
4257 4265
4258 4266 default:
4259 4267 return (FC_UNCLAIMED);
4260 4268 }
4261 4269 }
4262 4270
4263 4271
4264 4272 /*ARGSUSED*/
4265 4273 static int
4266 4274 fcp_data_callback(opaque_t ulph, opaque_t port_handle, fc_unsol_buf_t *buf,
4267 4275 uint32_t claimed)
4268 4276 {
4269 4277 return (FC_UNCLAIMED);
4270 4278 }
4271 4279
4272 4280 /*
4273 4281 * Function: fcp_statec_callback
4274 4282 *
4275 4283 * Description: The purpose of this function is to handle a port state change.
4276 4284 * It is called from fp/fctl and, in a few instances, internally.
4277 4285 *
4278 4286 * Argument: ulph fp/fctl port handle
4279 4287 * port_handle fcp_port structure
4280 4288 * port_state Physical state of the port
4281 4289 * port_top Topology
4282 4290 * *devlist Pointer to the first entry of a table
4283 4291 * containing the remote ports that can be
4284 4292 * reached.
4285 4293 * dev_cnt Number of entries pointed by devlist.
4286 4294 * port_sid Port ID of the local port.
4287 4295 *
4288 4296 * Return Value: None
4289 4297 */
4290 4298 /*ARGSUSED*/
4291 4299 static void
4292 4300 fcp_statec_callback(opaque_t ulph, opaque_t port_handle,
4293 4301 uint32_t port_state, uint32_t port_top, fc_portmap_t *devlist,
4294 4302 uint32_t dev_cnt, uint32_t port_sid)
4295 4303 {
4296 4304 uint32_t link_count;
4297 4305 int map_len = 0;
4298 4306 struct fcp_port *pptr;
4299 4307 fcp_map_tag_t *map_tag = NULL;
4300 4308
4301 4309 if ((pptr = fcp_get_port(port_handle)) == NULL) {
4302 4310 fcp_log(CE_WARN, NULL, "!Invalid port handle in callback");
4303 4311 return; /* nothing to work with! */
4304 4312 }
4305 4313
4306 4314 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4307 4315 fcp_trace, FCP_BUF_LEVEL_2, 0,
4308 4316 "fcp_statec_callback: port state/dev_cnt/top ="
4309 4317 "%d/%d/%d", FC_PORT_STATE_MASK(port_state),
4310 4318 dev_cnt, port_top);
4311 4319
4312 4320 mutex_enter(&pptr->port_mutex);
4313 4321
4314 4322 /*
4315 4323 * If a thread is in detach, don't do anything.
4316 4324 */
4317 4325 if (pptr->port_state & (FCP_STATE_DETACHING |
4318 4326 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
4319 4327 mutex_exit(&pptr->port_mutex);
4320 4328 return;
4321 4329 }
4322 4330
4323 4331 /*
4324 4332 * First thing we do is set the FCP_STATE_IN_CB_DEVC flag so that if
4325 4333 * init_pkt is called, it knows whether or not the target's status
4326 4334 * (or pd) might be changing.
4327 4335 */
4328 4336
4329 4337 if (FC_PORT_STATE_MASK(port_state) == FC_STATE_DEVICE_CHANGE) {
4330 4338 pptr->port_state |= FCP_STATE_IN_CB_DEVC;
4331 4339 }
4332 4340
4333 4341 /*
4334 4342 * the transport doesn't allocate or probe unless being
4335 4343 * asked to by either the applications or ULPs
4336 4344 *
4337 4345 * in cases where the port is OFFLINE at the time of port
4338 4346 * attach callback and the link comes ONLINE later, for
4339 4347 * easier automatic node creation (i.e. without you having to
4340 4348 * go out and run the utility to perform LOGINs) the
4341 4349 * following conditional is helpful
4342 4350 */
4343 4351 pptr->port_phys_state = port_state;
4344 4352
4345 4353 if (dev_cnt) {
4346 4354 mutex_exit(&pptr->port_mutex);
4347 4355
4348 4356 map_len = sizeof (*map_tag) * dev_cnt;
4349 4357 map_tag = kmem_alloc(map_len, KM_NOSLEEP);
4350 4358 if (map_tag == NULL) {
4351 4359 fcp_log(CE_WARN, pptr->port_dip,
4352 4360 "!fcp%d: failed to allocate for map tags; "
4353 4361 " state change will not be processed",
4354 4362 pptr->port_instance);
4355 4363
4356 4364 mutex_enter(&pptr->port_mutex);
4357 4365 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4358 4366 mutex_exit(&pptr->port_mutex);
4359 4367
4360 4368 return;
4361 4369 }
4362 4370
4363 4371 mutex_enter(&pptr->port_mutex);
4364 4372 }
4365 4373
4366 4374 if (pptr->port_id != port_sid) {
4367 4375 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4368 4376 fcp_trace, FCP_BUF_LEVEL_3, 0,
4369 4377 "fcp: Port S_ID=0x%x => 0x%x", pptr->port_id,
4370 4378 port_sid);
4371 4379 /*
4372 4380 * The local port changed ID. It is the first time a port ID
4373 4381 * is assigned or something drastic happened. We might have
4374 4382 * been unplugged and replugged on another loop or fabric port
4375 4383 * or somebody grabbed the AL_PA we had or somebody rezoned
4376 4384 * the fabric we were plugged into.
4377 4385 */
4378 4386 pptr->port_id = port_sid;
4379 4387 }
4380 4388
4381 4389 switch (FC_PORT_STATE_MASK(port_state)) {
4382 4390 case FC_STATE_OFFLINE:
4383 4391 case FC_STATE_RESET_REQUESTED:
4384 4392 /*
4385 4393 * link has gone from online to offline -- just update the
4386 4394 * state of this port to BUSY and MARKed to go offline
4387 4395 */
4388 4396 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4389 4397 fcp_trace, FCP_BUF_LEVEL_3, 0,
4390 4398 "link went offline");
4391 4399 if ((pptr->port_state & FCP_STATE_OFFLINE) && dev_cnt) {
4392 4400 /*
4393 4401 * We were offline a while ago and this one
4394 4402 * seems to indicate that the loop has gone
4395 4403 * dead forever.
4396 4404 */
4397 4405 pptr->port_tmp_cnt += dev_cnt;
4398 4406 pptr->port_state &= ~FCP_STATE_OFFLINE;
4399 4407 pptr->port_state |= FCP_STATE_INIT;
4400 4408 link_count = pptr->port_link_cnt;
4401 4409 fcp_handle_devices(pptr, devlist, dev_cnt,
4402 4410 link_count, map_tag, FCP_CAUSE_LINK_DOWN);
4403 4411 } else {
4404 4412 pptr->port_link_cnt++;
4405 4413 ASSERT(!(pptr->port_state & FCP_STATE_SUSPENDED));
4406 4414 fcp_update_state(pptr, (FCP_LUN_BUSY |
4407 4415 FCP_LUN_MARK), FCP_CAUSE_LINK_DOWN);
4408 4416 if (pptr->port_mpxio) {
4409 4417 fcp_update_mpxio_path_verifybusy(pptr);
4410 4418 }
4411 4419 pptr->port_state |= FCP_STATE_OFFLINE;
4412 4420 pptr->port_state &=
4413 4421 ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
4414 4422 pptr->port_tmp_cnt = 0;
4415 4423 }
4416 4424 mutex_exit(&pptr->port_mutex);
4417 4425 break;
4418 4426
4419 4427 case FC_STATE_ONLINE:
4420 4428 case FC_STATE_LIP:
4421 4429 case FC_STATE_LIP_LBIT_SET:
4422 4430 /*
4423 4431 * link has gone from offline to online
4424 4432 */
4425 4433 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4426 4434 fcp_trace, FCP_BUF_LEVEL_3, 0,
4427 4435 "link went online");
4428 4436
4429 4437 pptr->port_link_cnt++;
4430 4438
4431 4439 while (pptr->port_ipkt_cnt) {
4432 4440 mutex_exit(&pptr->port_mutex);
4433 4441 delay(drv_usectohz(1000000));
4434 4442 mutex_enter(&pptr->port_mutex);
4435 4443 }
4436 4444
4437 4445 pptr->port_topology = port_top;
4438 4446
4439 4447 /*
4440 4448 * The state of the targets and luns accessible through this
4441 4449 * port is updated.
4442 4450 */
4443 4451 fcp_update_state(pptr, FCP_LUN_BUSY | FCP_LUN_MARK,
4444 4452 FCP_CAUSE_LINK_CHANGE);
4445 4453
4446 4454 pptr->port_state &= ~(FCP_STATE_INIT | FCP_STATE_OFFLINE);
4447 4455 pptr->port_state |= FCP_STATE_ONLINING;
4448 4456 pptr->port_tmp_cnt = dev_cnt;
4449 4457 link_count = pptr->port_link_cnt;
4450 4458
4451 4459 pptr->port_deadline = fcp_watchdog_time +
4452 4460 FCP_ICMD_DEADLINE;
4453 4461
4454 4462 if (!dev_cnt) {
4455 4463 /*
4456 4464 * We go directly to the online state if no remote
4457 4465 * ports were discovered.
4458 4466 */
4459 4467 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4460 4468 fcp_trace, FCP_BUF_LEVEL_3, 0,
4461 4469 "No remote ports discovered");
4462 4470
4463 4471 pptr->port_state &= ~FCP_STATE_ONLINING;
4464 4472 pptr->port_state |= FCP_STATE_ONLINE;
4465 4473 }
4466 4474
4467 4475 switch (port_top) {
4468 4476 case FC_TOP_FABRIC:
4469 4477 case FC_TOP_PUBLIC_LOOP:
4470 4478 case FC_TOP_PRIVATE_LOOP:
4471 4479 case FC_TOP_PT_PT:
4472 4480
4473 4481 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4474 4482 fcp_retry_ns_registry(pptr, port_sid);
4475 4483 }
4476 4484
4477 4485 fcp_handle_devices(pptr, devlist, dev_cnt, link_count,
4478 4486 map_tag, FCP_CAUSE_LINK_CHANGE);
4479 4487 break;
4480 4488
4481 4489 default:
4482 4490 /*
4483 4491 * We got here because we were provided with an unknown
4484 4492 * topology.
4485 4493 */
4486 4494 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4487 4495 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
4488 4496 }
4489 4497
4490 4498 pptr->port_tmp_cnt -= dev_cnt;
4491 4499 fcp_log(CE_WARN, pptr->port_dip,
4492 4500 "!unknown/unsupported topology (0x%x)", port_top);
4493 4501 break;
4494 4502 }
4495 4503 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4496 4504 fcp_trace, FCP_BUF_LEVEL_3, 0,
4497 4505 "Notify ssd of the reset to reinstate the reservations");
4498 4506
4499 4507 scsi_hba_reset_notify_callback(&pptr->port_mutex,
4500 4508 &pptr->port_reset_notify_listf);
4501 4509
4502 4510 mutex_exit(&pptr->port_mutex);
4503 4511
4504 4512 break;
4505 4513
4506 4514 case FC_STATE_RESET:
4507 4515 ASSERT(pptr->port_state & FCP_STATE_OFFLINE);
4508 4516 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4509 4517 fcp_trace, FCP_BUF_LEVEL_3, 0,
4510 4518 "RESET state, waiting for Offline/Online state_cb");
4511 4519 mutex_exit(&pptr->port_mutex);
4512 4520 break;
4513 4521
4514 4522 case FC_STATE_DEVICE_CHANGE:
4515 4523 /*
4516 4524 * We come here when an application has requested
4517 4525 * Dynamic node creation/deletion in Fabric connectivity.
4518 4526 */
4519 4527 if (pptr->port_state & (FCP_STATE_OFFLINE |
4520 4528 FCP_STATE_INIT)) {
4521 4529 /*
4522 4530 * This case can happen when the FCTL is in the
4523 4531 * process of giving us on online and the host on
4524 4532 * the other side issues a PLOGI/PLOGO. Ideally
4525 4533 * the state changes should be serialized unless
4526 4534 * they are opposite (online-offline).
4527 4535 * The transport will give us a final state change
4528 4536 * so we can ignore this for the time being.
4529 4537 */
4530 4538 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4531 4539 mutex_exit(&pptr->port_mutex);
4532 4540 break;
4533 4541 }
4534 4542
4535 4543 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4536 4544 fcp_retry_ns_registry(pptr, port_sid);
4537 4545 }
4538 4546
4539 4547 /*
4540 4548 * Extend the deadline under steady state conditions
4541 4549 * to provide more time for the device-change-commands
4542 4550 */
4543 4551 if (!pptr->port_ipkt_cnt) {
4544 4552 pptr->port_deadline = fcp_watchdog_time +
4545 4553 FCP_ICMD_DEADLINE;
4546 4554 }
4547 4555
4548 4556 /*
4549 4557 * There is another race condition here, where if we were
4550 4558 * in ONLINEING state and a devices in the map logs out,
4551 4559 * fp will give another state change as DEVICE_CHANGE
4552 4560 * and OLD. This will result in that target being offlined.
4553 4561 * The pd_handle is freed. If from the first statec callback
4554 4562 * we were going to fire a PLOGI/PRLI, the system will
4555 4563 * panic in fc_ulp_transport with invalid pd_handle.
4556 4564 * The fix is to check for the link_cnt before issuing
4557 4565 * any command down.
4558 4566 */
4559 4567 fcp_update_targets(pptr, devlist, dev_cnt,
4560 4568 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_TGT_CHANGE);
4561 4569
4562 4570 link_count = pptr->port_link_cnt;
4563 4571
4564 4572 fcp_handle_devices(pptr, devlist, dev_cnt,
4565 4573 link_count, map_tag, FCP_CAUSE_TGT_CHANGE);
4566 4574
4567 4575 pptr->port_state &= ~FCP_STATE_IN_CB_DEVC;
4568 4576
4569 4577 mutex_exit(&pptr->port_mutex);
4570 4578 break;
4571 4579
4572 4580 case FC_STATE_TARGET_PORT_RESET:
4573 4581 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
4574 4582 fcp_retry_ns_registry(pptr, port_sid);
4575 4583 }
4576 4584
4577 4585 /* Do nothing else */
4578 4586 mutex_exit(&pptr->port_mutex);
4579 4587 break;
4580 4588
4581 4589 default:
4582 4590 fcp_log(CE_WARN, pptr->port_dip,
4583 4591 "!Invalid state change=0x%x", port_state);
4584 4592 mutex_exit(&pptr->port_mutex);
4585 4593 break;
4586 4594 }
4587 4595
4588 4596 if (map_tag) {
4589 4597 kmem_free(map_tag, map_len);
4590 4598 }
4591 4599 }
4592 4600
4593 4601 /*
4594 4602 * Function: fcp_handle_devices
4595 4603 *
4596 4604 * Description: This function updates the devices currently known by
4597 4605 * walking the list provided by the caller. The list passed
4598 4606 * by the caller is supposed to be the list of reachable
4599 4607 * devices.
4600 4608 *
4601 4609 * Argument: *pptr Fcp port structure.
4602 4610 * *devlist Pointer to the first entry of a table
4603 4611 * containing the remote ports that can be
4604 4612 * reached.
4605 4613 * dev_cnt Number of entries pointed by devlist.
4606 4614 * link_cnt Link state count.
4607 4615 * *map_tag Array of fcp_map_tag_t structures.
4608 4616 * cause What caused this function to be called.
4609 4617 *
4610 4618 * Return Value: None
4611 4619 *
4612 4620 * Notes: The pptr->port_mutex must be held.
4613 4621 */
4614 4622 static void
4615 4623 fcp_handle_devices(struct fcp_port *pptr, fc_portmap_t devlist[],
4616 4624 uint32_t dev_cnt, int link_cnt, fcp_map_tag_t *map_tag, int cause)
4617 4625 {
4618 4626 int i;
4619 4627 int check_finish_init = 0;
4620 4628 fc_portmap_t *map_entry;
4621 4629 struct fcp_tgt *ptgt = NULL;
4622 4630
4623 4631 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4624 4632 fcp_trace, FCP_BUF_LEVEL_3, 0,
4625 4633 "fcp_handle_devices: called for %d dev(s)", dev_cnt);
4626 4634
4627 4635 if (dev_cnt) {
4628 4636 ASSERT(map_tag != NULL);
4629 4637 }
4630 4638
4631 4639 /*
4632 4640 * The following code goes through the list of remote ports that are
4633 4641 * accessible through this (pptr) local port (The list walked is the
4634 4642 * one provided by the caller which is the list of the remote ports
4635 4643 * currently reachable). It checks if any of them was already
4636 4644 * known by looking for the corresponding target structure based on
4637 4645 * the world wide name. If a target is part of the list it is tagged
4638 4646 * (ptgt->tgt_aux_state = FCP_TGT_TAGGED).
4639 4647 *
4640 4648 * Old comment
4641 4649 * -----------
4642 4650 * Before we drop port mutex; we MUST get the tags updated; This
4643 4651 * two step process is somewhat slow, but more reliable.
4644 4652 */
4645 4653 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4646 4654 map_entry = &(devlist[i]);
4647 4655
4648 4656 /*
4649 4657 * get ptr to this map entry in our port's
4650 4658 * list (if any)
4651 4659 */
4652 4660 ptgt = fcp_lookup_target(pptr,
4653 4661 (uchar_t *)&(map_entry->map_pwwn));
4654 4662
4655 4663 if (ptgt) {
4656 4664 map_tag[i] = ptgt->tgt_change_cnt;
4657 4665 if (cause == FCP_CAUSE_LINK_CHANGE) {
4658 4666 ptgt->tgt_aux_state = FCP_TGT_TAGGED;
4659 4667 }
4660 4668 }
4661 4669 }
4662 4670
4663 4671 /*
4664 4672 * At this point we know which devices of the new list were already
4665 4673 * known (The field tgt_aux_state of the target structure has been
4666 4674 * set to FCP_TGT_TAGGED).
4667 4675 *
4668 4676 * The following code goes through the list of targets currently known
4669 4677 * by the local port (the list is actually a hashing table). If a
4670 4678 * target is found and is not tagged, it means the target cannot
4671 4679 * be reached anymore through the local port (pptr). It is offlined.
4672 4680 * The offlining only occurs if the cause is FCP_CAUSE_LINK_CHANGE.
4673 4681 */
4674 4682 for (i = 0; i < FCP_NUM_HASH; i++) {
4675 4683 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
4676 4684 ptgt = ptgt->tgt_next) {
4677 4685 mutex_enter(&ptgt->tgt_mutex);
4678 4686 if ((ptgt->tgt_aux_state != FCP_TGT_TAGGED) &&
4679 4687 (cause == FCP_CAUSE_LINK_CHANGE) &&
4680 4688 !(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4681 4689 fcp_offline_target_now(pptr, ptgt,
4682 4690 link_cnt, ptgt->tgt_change_cnt, 0);
4683 4691 }
4684 4692 mutex_exit(&ptgt->tgt_mutex);
4685 4693 }
4686 4694 }
4687 4695
4688 4696 /*
4689 4697 * At this point, the devices that were known but cannot be reached
4690 4698 * anymore, have most likely been offlined.
4691 4699 *
4692 4700 * The following section of code seems to go through the list of
4693 4701 * remote ports that can now be reached. For every single one it
4694 4702 * checks if it is already known or if it is a new port.
4695 4703 */
4696 4704 for (i = 0; (i < dev_cnt) && (pptr->port_link_cnt == link_cnt); i++) {
4697 4705
4698 4706 if (check_finish_init) {
4699 4707 ASSERT(i > 0);
4700 4708 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4701 4709 map_tag[i - 1], cause);
4702 4710 check_finish_init = 0;
4703 4711 }
4704 4712
4705 4713 /* get a pointer to this map entry */
4706 4714 map_entry = &(devlist[i]);
4707 4715
4708 4716 /*
4709 4717 * Check for the duplicate map entry flag. If we have marked
4710 4718 * this entry as a duplicate we skip it since the correct
4711 4719 * (perhaps even same) state change will be encountered
4712 4720 * later in the list.
4713 4721 */
4714 4722 if (map_entry->map_flags & PORT_DEVICE_DUPLICATE_MAP_ENTRY) {
4715 4723 continue;
4716 4724 }
4717 4725
4718 4726 /* get ptr to this map entry in our port's list (if any) */
4719 4727 ptgt = fcp_lookup_target(pptr,
4720 4728 (uchar_t *)&(map_entry->map_pwwn));
4721 4729
4722 4730 if (ptgt) {
4723 4731 /*
4724 4732 * This device was already known. The field
4725 4733 * tgt_aux_state is reset (was probably set to
4726 4734 * FCP_TGT_TAGGED previously in this routine).
4727 4735 */
4728 4736 ptgt->tgt_aux_state = 0;
4729 4737 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4730 4738 fcp_trace, FCP_BUF_LEVEL_3, 0,
4731 4739 "handle_devices: map did/state/type/flags = "
4732 4740 "0x%x/0x%x/0x%x/0x%x, tgt_d_id=0x%x, "
4733 4741 "tgt_state=%d",
4734 4742 map_entry->map_did.port_id, map_entry->map_state,
4735 4743 map_entry->map_type, map_entry->map_flags,
4736 4744 ptgt->tgt_d_id, ptgt->tgt_state);
4737 4745 }
4738 4746
4739 4747 if (map_entry->map_type == PORT_DEVICE_OLD ||
4740 4748 map_entry->map_type == PORT_DEVICE_NEW ||
4741 4749 map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED ||
4742 4750 map_entry->map_type == PORT_DEVICE_CHANGED) {
4743 4751 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4744 4752 fcp_trace, FCP_BUF_LEVEL_2, 0,
4745 4753 "map_type=%x, did = %x",
4746 4754 map_entry->map_type,
4747 4755 map_entry->map_did.port_id);
4748 4756 }
4749 4757
4750 4758 switch (map_entry->map_type) {
4751 4759 case PORT_DEVICE_NOCHANGE:
4752 4760 case PORT_DEVICE_USER_CREATE:
4753 4761 case PORT_DEVICE_USER_LOGIN:
4754 4762 case PORT_DEVICE_NEW:
4755 4763 case PORT_DEVICE_REPORTLUN_CHANGED:
4756 4764 FCP_TGT_TRACE(ptgt, map_tag[i], FCP_TGT_TRACE_1);
4757 4765
4758 4766 if (fcp_handle_mapflags(pptr, ptgt, map_entry,
4759 4767 link_cnt, (ptgt) ? map_tag[i] : 0,
4760 4768 cause) == TRUE) {
4761 4769
4762 4770 FCP_TGT_TRACE(ptgt, map_tag[i],
4763 4771 FCP_TGT_TRACE_2);
4764 4772 check_finish_init++;
4765 4773 }
4766 4774 break;
4767 4775
4768 4776 case PORT_DEVICE_OLD:
4769 4777 if (ptgt != NULL) {
4770 4778 FCP_TGT_TRACE(ptgt, map_tag[i],
4771 4779 FCP_TGT_TRACE_3);
4772 4780
4773 4781 mutex_enter(&ptgt->tgt_mutex);
4774 4782 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4775 4783 /*
4776 4784 * Must do an in-line wait for I/Os
4777 4785 * to get drained
4778 4786 */
4779 4787 mutex_exit(&ptgt->tgt_mutex);
4780 4788 mutex_exit(&pptr->port_mutex);
4781 4789
4782 4790 mutex_enter(&ptgt->tgt_mutex);
4783 4791 while (ptgt->tgt_ipkt_cnt ||
4784 4792 fcp_outstanding_lun_cmds(ptgt)
4785 4793 == FC_SUCCESS) {
4786 4794 mutex_exit(&ptgt->tgt_mutex);
4787 4795 delay(drv_usectohz(1000000));
4788 4796 mutex_enter(&ptgt->tgt_mutex);
4789 4797 }
4790 4798 mutex_exit(&ptgt->tgt_mutex);
4791 4799
4792 4800 mutex_enter(&pptr->port_mutex);
4793 4801 mutex_enter(&ptgt->tgt_mutex);
4794 4802
4795 4803 (void) fcp_offline_target(pptr, ptgt,
4796 4804 link_cnt, map_tag[i], 0, 0);
4797 4805 }
4798 4806 mutex_exit(&ptgt->tgt_mutex);
4799 4807 }
4800 4808 check_finish_init++;
4801 4809 break;
4802 4810
4803 4811 case PORT_DEVICE_USER_DELETE:
4804 4812 case PORT_DEVICE_USER_LOGOUT:
4805 4813 if (ptgt != NULL) {
4806 4814 FCP_TGT_TRACE(ptgt, map_tag[i],
4807 4815 FCP_TGT_TRACE_4);
4808 4816
4809 4817 mutex_enter(&ptgt->tgt_mutex);
4810 4818 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
4811 4819 (void) fcp_offline_target(pptr, ptgt,
4812 4820 link_cnt, map_tag[i], 1, 0);
4813 4821 }
4814 4822 mutex_exit(&ptgt->tgt_mutex);
4815 4823 }
4816 4824 check_finish_init++;
4817 4825 break;
4818 4826
4819 4827 case PORT_DEVICE_CHANGED:
4820 4828 if (ptgt != NULL) {
4821 4829 FCP_TGT_TRACE(ptgt, map_tag[i],
4822 4830 FCP_TGT_TRACE_5);
4823 4831
4824 4832 if (fcp_device_changed(pptr, ptgt,
4825 4833 map_entry, link_cnt, map_tag[i],
4826 4834 cause) == TRUE) {
4827 4835 check_finish_init++;
4828 4836 }
4829 4837 } else {
4830 4838 if (fcp_handle_mapflags(pptr, ptgt,
4831 4839 map_entry, link_cnt, 0, cause) == TRUE) {
4832 4840 check_finish_init++;
4833 4841 }
4834 4842 }
4835 4843 break;
4836 4844
4837 4845 default:
4838 4846 fcp_log(CE_WARN, pptr->port_dip,
4839 4847 "!Invalid map_type=0x%x", map_entry->map_type);
4840 4848 check_finish_init++;
4841 4849 break;
4842 4850 }
4843 4851 }
4844 4852
4845 4853 if (check_finish_init && pptr->port_link_cnt == link_cnt) {
4846 4854 ASSERT(i > 0);
4847 4855 (void) fcp_call_finish_init_held(pptr, ptgt, link_cnt,
4848 4856 map_tag[i-1], cause);
4849 4857 } else if (dev_cnt == 0 && pptr->port_link_cnt == link_cnt) {
4850 4858 fcp_offline_all(pptr, link_cnt, cause);
4851 4859 }
4852 4860 }
4853 4861
4854 4862 static int
4855 4863 fcp_handle_reportlun_changed(struct fcp_tgt *ptgt, int cause)
4856 4864 {
4857 4865 struct fcp_lun *plun;
4858 4866 struct fcp_port *pptr;
4859 4867 int rscn_count;
4860 4868 int lun0_newalloc;
4861 4869 int ret = TRUE;
4862 4870
4863 4871 ASSERT(ptgt);
4864 4872 pptr = ptgt->tgt_port;
4865 4873 lun0_newalloc = 0;
4866 4874 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
4867 4875 /*
4868 4876 * no LUN struct for LUN 0 yet exists,
4869 4877 * so create one
4870 4878 */
4871 4879 plun = fcp_alloc_lun(ptgt);
4872 4880 if (plun == NULL) {
4873 4881 fcp_log(CE_WARN, pptr->port_dip,
4874 4882 "!Failed to allocate lun 0 for"
4875 4883 " D_ID=%x", ptgt->tgt_d_id);
4876 4884 return (ret);
4877 4885 }
4878 4886 lun0_newalloc = 1;
4879 4887 }
4880 4888
4881 4889 mutex_enter(&ptgt->tgt_mutex);
4882 4890 /*
4883 4891 * consider lun 0 as device not connected if it is
4884 4892 * offlined or newly allocated
4885 4893 */
4886 4894 if ((plun->lun_state & FCP_LUN_OFFLINE) || lun0_newalloc) {
4887 4895 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
4888 4896 }
4889 4897 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
4890 4898 plun->lun_state &= ~FCP_LUN_OFFLINE;
4891 4899 ptgt->tgt_lun_cnt = 1;
4892 4900 ptgt->tgt_report_lun_cnt = 0;
4893 4901 mutex_exit(&ptgt->tgt_mutex);
4894 4902
4895 4903 rscn_count = fc_ulp_get_rscn_count(pptr->port_fp_handle);
4896 4904 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
4897 4905 sizeof (struct fcp_reportlun_resp), pptr->port_link_cnt,
4898 4906 ptgt->tgt_change_cnt, cause, rscn_count) != DDI_SUCCESS) {
4899 4907 FCP_TRACE(fcp_logq, pptr->port_instbuf,
4900 4908 fcp_trace, FCP_BUF_LEVEL_3, 0, "!Failed to send REPORTLUN "
4901 4909 "to D_ID=%x", ptgt->tgt_d_id);
4902 4910 } else {
4903 4911 ret = FALSE;
4904 4912 }
4905 4913
4906 4914 return (ret);
4907 4915 }
4908 4916
4909 4917 /*
4910 4918 * Function: fcp_handle_mapflags
4911 4919 *
4912 4920 * Description: This function creates a target structure if the ptgt passed
4913 4921 * is NULL. It also kicks off the PLOGI if we are not logged
4914 4922 * into the target yet or the PRLI if we are logged into the
4915 4923 * target already. The rest of the treatment is done in the
4916 4924 * callbacks of the PLOGI or PRLI.
4917 4925 *
4918 4926 * Argument: *pptr FCP Port structure.
4919 4927 * *ptgt Target structure.
4920 4928 * *map_entry Array of fc_portmap_t structures.
4921 4929 * link_cnt Link state count.
4922 4930 * tgt_cnt Target state count.
4923 4931 * cause What caused this function to be called.
4924 4932 *
4925 4933 * Return Value: TRUE Failed
4926 4934 * FALSE Succeeded
4927 4935 *
4928 4936 * Notes: pptr->port_mutex must be owned.
4929 4937 */
4930 4938 static int
4931 4939 fcp_handle_mapflags(struct fcp_port *pptr, struct fcp_tgt *ptgt,
4932 4940 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
4933 4941 {
4934 4942 int lcount;
4935 4943 int tcount;
4936 4944 int ret = TRUE;
4937 4945 int alloc;
4938 4946 struct fcp_ipkt *icmd;
4939 4947 struct fcp_lun *pseq_lun = NULL;
4940 4948 uchar_t opcode;
4941 4949 int valid_ptgt_was_passed = FALSE;
4942 4950
4943 4951 ASSERT(mutex_owned(&pptr->port_mutex));
4944 4952
4945 4953 /*
4946 4954 * This case is possible where the FCTL has come up and done discovery
4947 4955 * before FCP was loaded and attached. FCTL would have discovered the
4948 4956 * devices and later the ULP came online. In this case ULP's would get
4949 4957 * PORT_DEVICE_NOCHANGE but target would be NULL.
4950 4958 */
4951 4959 if (ptgt == NULL) {
4952 4960 /* don't already have a target */
4953 4961 mutex_exit(&pptr->port_mutex);
4954 4962 ptgt = fcp_alloc_tgt(pptr, map_entry, link_cnt);
4955 4963 mutex_enter(&pptr->port_mutex);
4956 4964
4957 4965 if (ptgt == NULL) {
4958 4966 fcp_log(CE_WARN, pptr->port_dip,
4959 4967 "!FC target allocation failed");
4960 4968 return (ret);
4961 4969 }
4962 4970 mutex_enter(&ptgt->tgt_mutex);
4963 4971 ptgt->tgt_statec_cause = cause;
4964 4972 ptgt->tgt_tmp_cnt = 1;
4965 4973 mutex_exit(&ptgt->tgt_mutex);
4966 4974 } else {
4967 4975 valid_ptgt_was_passed = TRUE;
4968 4976 }
4969 4977
4970 4978 /*
4971 4979 * Copy in the target parameters
4972 4980 */
4973 4981 mutex_enter(&ptgt->tgt_mutex);
4974 4982 ptgt->tgt_d_id = map_entry->map_did.port_id;
4975 4983 ptgt->tgt_hard_addr = map_entry->map_hard_addr.hard_addr;
4976 4984 ptgt->tgt_pd_handle = map_entry->map_pd;
4977 4985 ptgt->tgt_fca_dev = NULL;
4978 4986
4979 4987 /* Copy port and node WWNs */
4980 4988 bcopy(&map_entry->map_nwwn, &ptgt->tgt_node_wwn.raw_wwn[0],
4981 4989 FC_WWN_SIZE);
4982 4990 bcopy(&map_entry->map_pwwn, &ptgt->tgt_port_wwn.raw_wwn[0],
4983 4991 FC_WWN_SIZE);
4984 4992
4985 4993 if (!(map_entry->map_flags & PORT_DEVICE_NO_SKIP_DEVICE_DISCOVERY) &&
4986 4994 (map_entry->map_type == PORT_DEVICE_NOCHANGE) &&
4987 4995 (map_entry->map_state == PORT_DEVICE_LOGGED_IN) &&
4988 4996 valid_ptgt_was_passed) {
4989 4997 /*
4990 4998 * determine if there are any tape LUNs on this target
4991 4999 */
4992 5000 for (pseq_lun = ptgt->tgt_lun;
4993 5001 pseq_lun != NULL;
4994 5002 pseq_lun = pseq_lun->lun_next) {
4995 5003 if ((pseq_lun->lun_type == DTYPE_SEQUENTIAL) &&
4996 5004 !(pseq_lun->lun_state & FCP_LUN_OFFLINE)) {
4997 5005 fcp_update_tgt_state(ptgt, FCP_RESET,
4998 5006 FCP_LUN_MARK);
4999 5007 mutex_exit(&ptgt->tgt_mutex);
5000 5008 return (ret);
5001 5009 }
5002 5010 }
5003 5011 }
5004 5012
5005 5013 /*
5006 5014 * if UA'REPORT_LUN_CHANGED received,
5007 5015 * send out REPORT LUN promptly, skip PLOGI/PRLI process
5008 5016 */
5009 5017 if (map_entry->map_type == PORT_DEVICE_REPORTLUN_CHANGED) {
5010 5018 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE | FCP_TGT_MARK);
5011 5019 mutex_exit(&ptgt->tgt_mutex);
5012 5020 mutex_exit(&pptr->port_mutex);
5013 5021
5014 5022 ret = fcp_handle_reportlun_changed(ptgt, cause);
5015 5023
5016 5024 mutex_enter(&pptr->port_mutex);
5017 5025 return (ret);
5018 5026 }
5019 5027
5020 5028 /*
5021 5029 * If ptgt was NULL when this function was entered, then tgt_node_state
5022 5030 * was never specifically initialized but zeroed out which means
5023 5031 * FCP_TGT_NODE_NONE.
5024 5032 */
5025 5033 switch (ptgt->tgt_node_state) {
5026 5034 case FCP_TGT_NODE_NONE:
5027 5035 case FCP_TGT_NODE_ON_DEMAND:
5028 5036 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5029 5037 !fcp_enable_auto_configuration &&
5030 5038 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5031 5039 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5032 5040 } else if (FC_TOP_EXTERNAL(pptr->port_topology) &&
5033 5041 fcp_enable_auto_configuration &&
5034 5042 (ptgt->tgt_manual_config_only == 1) &&
5035 5043 map_entry->map_type != PORT_DEVICE_USER_CREATE) {
5036 5044 /*
5037 5045 * If auto configuration is set and
5038 5046 * the tgt_manual_config_only flag is set then
5039 5047 * we only want the user to be able to change
5040 5048 * the state through create_on_demand.
5041 5049 */
5042 5050 ptgt->tgt_node_state = FCP_TGT_NODE_ON_DEMAND;
5043 5051 } else {
5044 5052 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5045 5053 }
5046 5054 break;
5047 5055
5048 5056 case FCP_TGT_NODE_PRESENT:
5049 5057 break;
5050 5058 }
5051 5059 /*
5052 5060 * If we are booting from a fabric device, make sure we
5053 5061 * mark the node state appropriately for this target to be
5054 5062 * enumerated
5055 5063 */
5056 5064 if (FC_TOP_EXTERNAL(pptr->port_topology) && pptr->port_boot_wwn[0]) {
5057 5065 if (bcmp((caddr_t)pptr->port_boot_wwn,
5058 5066 (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
5059 5067 sizeof (ptgt->tgt_port_wwn)) == 0) {
5060 5068 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
5061 5069 }
5062 5070 }
5063 5071 mutex_exit(&ptgt->tgt_mutex);
5064 5072
5065 5073 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5066 5074 fcp_trace, FCP_BUF_LEVEL_3, 0,
5067 5075 "map_pd=%p, map_type=%x, did = %x, ulp_rscn_count=0x%x",
5068 5076 map_entry->map_pd, map_entry->map_type, map_entry->map_did.port_id,
5069 5077 map_entry->map_rscn_info.ulp_rscn_count);
5070 5078
5071 5079 mutex_enter(&ptgt->tgt_mutex);
5072 5080
5073 5081 /*
5074 5082 * Reset target OFFLINE state and mark the target BUSY
5075 5083 */
5076 5084 ptgt->tgt_state &= ~FCP_TGT_OFFLINE;
5077 5085 ptgt->tgt_state |= (FCP_TGT_BUSY | FCP_TGT_MARK);
5078 5086
5079 5087 tcount = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
5080 5088 lcount = link_cnt;
5081 5089
5082 5090 mutex_exit(&ptgt->tgt_mutex);
5083 5091 mutex_exit(&pptr->port_mutex);
5084 5092
5085 5093 /*
5086 5094 * if we are already logged in, then we do a PRLI, else
5087 5095 * we do a PLOGI first (to get logged in)
5088 5096 *
5089 5097 * We will not check if we are the PLOGI initiator
5090 5098 */
5091 5099 opcode = (map_entry->map_state == PORT_DEVICE_LOGGED_IN &&
5092 5100 map_entry->map_pd != NULL) ? LA_ELS_PRLI : LA_ELS_PLOGI;
5093 5101
5094 5102 alloc = FCP_MAX(sizeof (la_els_logi_t), sizeof (la_els_prli_t));
5095 5103
5096 5104 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5097 5105 pptr->port_state & FCP_STATE_FCA_IS_NODMA, lcount, tcount,
5098 5106 cause, map_entry->map_rscn_info.ulp_rscn_count);
5099 5107
5100 5108 if (icmd == NULL) {
5101 5109 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_29);
5102 5110 /*
5103 5111 * We've exited port_mutex before calling fcp_icmd_alloc,
5104 5112 * we need to make sure we reacquire it before returning.
5105 5113 */
5106 5114 mutex_enter(&pptr->port_mutex);
5107 5115 return (FALSE);
5108 5116 }
5109 5117
5110 5118 /* TRUE is only returned while target is intended skipped */
5111 5119 ret = FALSE;
5112 5120 /* discover info about this target */
5113 5121 if ((fcp_send_els(pptr, ptgt, icmd, opcode,
5114 5122 lcount, tcount, cause)) == DDI_SUCCESS) {
5115 5123 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_9);
5116 5124 } else {
5117 5125 fcp_icmd_free(pptr, icmd);
5118 5126 ret = TRUE;
5119 5127 }
5120 5128 mutex_enter(&pptr->port_mutex);
5121 5129
5122 5130 return (ret);
5123 5131 }
5124 5132
5125 5133 /*
5126 5134 * Function: fcp_send_els
5127 5135 *
5128 5136 * Description: Sends an ELS to the target specified by the caller. Supports
5129 5137 * PLOGI and PRLI.
5130 5138 *
5131 5139 * Argument: *pptr Fcp port.
5132 5140 * *ptgt Target to send the ELS to.
5133 5141 * *icmd Internal packet
5134 5142 * opcode ELS opcode
5135 5143 * lcount Link state change counter
5136 5144 * tcount Target state change counter
5137 5145 * cause What caused the call
5138 5146 *
5139 5147 * Return Value: DDI_SUCCESS
5140 5148 * Others
5141 5149 */
5142 5150 static int
5143 5151 fcp_send_els(struct fcp_port *pptr, struct fcp_tgt *ptgt,
5144 5152 struct fcp_ipkt *icmd, uchar_t opcode, int lcount, int tcount, int cause)
5145 5153 {
5146 5154 fc_packet_t *fpkt;
5147 5155 fc_frame_hdr_t *hp;
5148 5156 int internal = 0;
5149 5157 int alloc;
5150 5158 int cmd_len;
5151 5159 int resp_len;
5152 5160 int res = DDI_FAILURE; /* default result */
5153 5161 int rval = DDI_FAILURE;
5154 5162
5155 5163 ASSERT(opcode == LA_ELS_PLOGI || opcode == LA_ELS_PRLI);
5156 5164 ASSERT(ptgt->tgt_port == pptr);
5157 5165
5158 5166 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5159 5167 fcp_trace, FCP_BUF_LEVEL_5, 0,
5160 5168 "fcp_send_els: d_id=0x%x ELS 0x%x (%s)", ptgt->tgt_d_id, opcode,
5161 5169 (opcode == LA_ELS_PLOGI) ? "PLOGI" : "PRLI");
5162 5170
5163 5171 if (opcode == LA_ELS_PLOGI) {
5164 5172 cmd_len = sizeof (la_els_logi_t);
5165 5173 resp_len = sizeof (la_els_logi_t);
5166 5174 } else {
5167 5175 ASSERT(opcode == LA_ELS_PRLI);
5168 5176 cmd_len = sizeof (la_els_prli_t);
5169 5177 resp_len = sizeof (la_els_prli_t);
5170 5178 }
5171 5179
5172 5180 if (icmd == NULL) {
5173 5181 alloc = FCP_MAX(sizeof (la_els_logi_t),
5174 5182 sizeof (la_els_prli_t));
5175 5183 icmd = fcp_icmd_alloc(pptr, ptgt, alloc, alloc, 0,
5176 5184 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5177 5185 lcount, tcount, cause, FC_INVALID_RSCN_COUNT);
5178 5186 if (icmd == NULL) {
5179 5187 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_10);
5180 5188 return (res);
5181 5189 }
5182 5190 internal++;
5183 5191 }
5184 5192 fpkt = icmd->ipkt_fpkt;
5185 5193
5186 5194 fpkt->pkt_cmdlen = cmd_len;
5187 5195 fpkt->pkt_rsplen = resp_len;
5188 5196 fpkt->pkt_datalen = 0;
5189 5197 icmd->ipkt_retries = 0;
5190 5198
5191 5199 /* fill in fpkt info */
5192 5200 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5193 5201 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
5194 5202 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5195 5203
5196 5204 /* get ptr to frame hdr in fpkt */
5197 5205 hp = &fpkt->pkt_cmd_fhdr;
5198 5206
5199 5207 /*
5200 5208 * fill in frame hdr
5201 5209 */
5202 5210 hp->r_ctl = R_CTL_ELS_REQ;
5203 5211 hp->s_id = pptr->port_id; /* source ID */
5204 5212 hp->d_id = ptgt->tgt_d_id; /* dest ID */
5205 5213 hp->type = FC_TYPE_EXTENDED_LS;
5206 5214 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
5207 5215 hp->seq_id = 0;
5208 5216 hp->rsvd = 0;
5209 5217 hp->df_ctl = 0;
5210 5218 hp->seq_cnt = 0;
5211 5219 hp->ox_id = 0xffff; /* i.e. none */
5212 5220 hp->rx_id = 0xffff; /* i.e. none */
5213 5221 hp->ro = 0;
5214 5222
5215 5223 /*
5216 5224 * at this point we have a filled in cmd pkt
5217 5225 *
5218 5226 * fill in the respective info, then use the transport to send
5219 5227 * the packet
5220 5228 *
5221 5229 * for a PLOGI call fc_ulp_login(), and
5222 5230 * for a PRLI call fc_ulp_issue_els()
5223 5231 */
5224 5232 switch (opcode) {
5225 5233 case LA_ELS_PLOGI: {
5226 5234 struct la_els_logi logi;
5227 5235
5228 5236 bzero(&logi, sizeof (struct la_els_logi));
5229 5237
5230 5238 hp = &fpkt->pkt_cmd_fhdr;
5231 5239 hp->r_ctl = R_CTL_ELS_REQ;
5232 5240 logi.ls_code.ls_code = LA_ELS_PLOGI;
5233 5241 logi.ls_code.mbz = 0;
5234 5242
5235 5243 FCP_CP_OUT((uint8_t *)&logi, fpkt->pkt_cmd,
5236 5244 fpkt->pkt_cmd_acc, sizeof (struct la_els_logi));
5237 5245
5238 5246 icmd->ipkt_opcode = LA_ELS_PLOGI;
5239 5247
5240 5248 mutex_enter(&pptr->port_mutex);
5241 5249 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5242 5250
5243 5251 mutex_exit(&pptr->port_mutex);
5244 5252
5245 5253 rval = fc_ulp_login(pptr->port_fp_handle, &fpkt, 1);
5246 5254 if (rval == FC_SUCCESS) {
5247 5255 res = DDI_SUCCESS;
5248 5256 break;
5249 5257 }
5250 5258
5251 5259 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_11);
5252 5260
5253 5261 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5254 5262 rval, "PLOGI");
5255 5263 } else {
5256 5264 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5257 5265 fcp_trace, FCP_BUF_LEVEL_5, 0,
5258 5266 "fcp_send_els1: state change occured"
5259 5267 " for D_ID=0x%x", ptgt->tgt_d_id);
5260 5268 mutex_exit(&pptr->port_mutex);
5261 5269 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_12);
5262 5270 }
5263 5271 break;
5264 5272 }
5265 5273
5266 5274 case LA_ELS_PRLI: {
5267 5275 struct la_els_prli prli;
5268 5276 struct fcp_prli *fprli;
5269 5277
5270 5278 bzero(&prli, sizeof (struct la_els_prli));
5271 5279
5272 5280 hp = &fpkt->pkt_cmd_fhdr;
5273 5281 hp->r_ctl = R_CTL_ELS_REQ;
5274 5282
5275 5283 /* fill in PRLI cmd ELS fields */
5276 5284 prli.ls_code = LA_ELS_PRLI;
5277 5285 prli.page_length = 0x10; /* huh? */
5278 5286 prli.payload_length = sizeof (struct la_els_prli);
5279 5287
5280 5288 icmd->ipkt_opcode = LA_ELS_PRLI;
5281 5289
5282 5290 /* get ptr to PRLI service params */
5283 5291 fprli = (struct fcp_prli *)prli.service_params;
5284 5292
5285 5293 /* fill in service params */
5286 5294 fprli->type = 0x08;
5287 5295 fprli->resvd1 = 0;
5288 5296 fprli->orig_process_assoc_valid = 0;
5289 5297 fprli->resp_process_assoc_valid = 0;
5290 5298 fprli->establish_image_pair = 1;
5291 5299 fprli->resvd2 = 0;
5292 5300 fprli->resvd3 = 0;
5293 5301 fprli->obsolete_1 = 0;
5294 5302 fprli->obsolete_2 = 0;
5295 5303 fprli->data_overlay_allowed = 0;
5296 5304 fprli->initiator_fn = 1;
5297 5305 fprli->confirmed_compl_allowed = 1;
5298 5306
5299 5307 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5300 5308 fprli->target_fn = 1;
5301 5309 } else {
5302 5310 fprli->target_fn = 0;
5303 5311 }
5304 5312
5305 5313 fprli->retry = 1;
5306 5314 fprli->read_xfer_rdy_disabled = 1;
5307 5315 fprli->write_xfer_rdy_disabled = 0;
5308 5316
5309 5317 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5310 5318 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5311 5319
5312 5320 /* issue the PRLI request */
5313 5321
5314 5322 mutex_enter(&pptr->port_mutex);
5315 5323 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
5316 5324
5317 5325 mutex_exit(&pptr->port_mutex);
5318 5326
5319 5327 rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt);
5320 5328 if (rval == FC_SUCCESS) {
5321 5329 res = DDI_SUCCESS;
5322 5330 break;
5323 5331 }
5324 5332
5325 5333 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_13);
5326 5334
5327 5335 res = fcp_handle_ipkt_errors(pptr, ptgt, icmd,
5328 5336 rval, "PRLI");
5329 5337 } else {
5330 5338 mutex_exit(&pptr->port_mutex);
5331 5339 FCP_TGT_TRACE(ptgt, tcount, FCP_TGT_TRACE_14);
5332 5340 }
5333 5341 break;
5334 5342 }
5335 5343
5336 5344 default:
5337 5345 fcp_log(CE_WARN, NULL, "!invalid ELS opcode=0x%x", opcode);
5338 5346 break;
5339 5347 }
5340 5348
5341 5349 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5342 5350 fcp_trace, FCP_BUF_LEVEL_5, 0,
5343 5351 "fcp_send_els: returning %d", res);
5344 5352
5345 5353 if (res != DDI_SUCCESS) {
5346 5354 if (internal) {
5347 5355 fcp_icmd_free(pptr, icmd);
5348 5356 }
5349 5357 }
5350 5358
5351 5359 return (res);
5352 5360 }
5353 5361
5354 5362
5355 5363 /*
5356 5364 * called internally update the state of all of the tgts and each LUN
5357 5365 * for this port (i.e. each target known to be attached to this port)
5358 5366 * if they are not already offline
5359 5367 *
5360 5368 * must be called with the port mutex owned
5361 5369 *
5362 5370 * acquires and releases the target mutexes for each target attached
5363 5371 * to this port
5364 5372 */
5365 5373 void
5366 5374 fcp_update_state(struct fcp_port *pptr, uint32_t state, int cause)
5367 5375 {
5368 5376 int i;
5369 5377 struct fcp_tgt *ptgt;
5370 5378
5371 5379 ASSERT(mutex_owned(&pptr->port_mutex));
5372 5380
5373 5381 for (i = 0; i < FCP_NUM_HASH; i++) {
5374 5382 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5375 5383 ptgt = ptgt->tgt_next) {
5376 5384 mutex_enter(&ptgt->tgt_mutex);
5377 5385 fcp_update_tgt_state(ptgt, FCP_SET, state);
5378 5386 ptgt->tgt_change_cnt++;
5379 5387 ptgt->tgt_statec_cause = cause;
5380 5388 ptgt->tgt_tmp_cnt = 1;
5381 5389 ptgt->tgt_done = 0;
5382 5390 mutex_exit(&ptgt->tgt_mutex);
5383 5391 }
5384 5392 }
5385 5393 }
5386 5394
5387 5395
5388 5396 static void
5389 5397 fcp_offline_all(struct fcp_port *pptr, int lcount, int cause)
5390 5398 {
5391 5399 int i;
5392 5400 int ndevs;
5393 5401 struct fcp_tgt *ptgt;
5394 5402
5395 5403 ASSERT(mutex_owned(&pptr->port_mutex));
5396 5404
5397 5405 for (ndevs = 0, i = 0; i < FCP_NUM_HASH; i++) {
5398 5406 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5399 5407 ptgt = ptgt->tgt_next) {
5400 5408 ndevs++;
5401 5409 }
5402 5410 }
5403 5411
5404 5412 if (ndevs == 0) {
5405 5413 return;
5406 5414 }
5407 5415 pptr->port_tmp_cnt = ndevs;
5408 5416
5409 5417 for (i = 0; i < FCP_NUM_HASH; i++) {
5410 5418 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
5411 5419 ptgt = ptgt->tgt_next) {
5412 5420 (void) fcp_call_finish_init_held(pptr, ptgt,
5413 5421 lcount, ptgt->tgt_change_cnt, cause);
5414 5422 }
5415 5423 }
5416 5424 }
5417 5425
5418 5426 /*
5419 5427 * Function: fcp_update_tgt_state
5420 5428 *
5421 5429 * Description: This function updates the field tgt_state of a target. That
5422 5430 * field is a bitmap and which bit can be set or reset
5423 5431 * individually. The action applied to the target state is also
5424 5432 * applied to all the LUNs belonging to the target (provided the
5425 5433 * LUN is not offline). A side effect of applying the state
5426 5434 * modification to the target and the LUNs is the field tgt_trace
5427 5435 * of the target and lun_trace of the LUNs is set to zero.
5428 5436 *
5429 5437 *
5430 5438 * Argument: *ptgt Target structure.
5431 5439 * flag Flag indication what action to apply (set/reset).
5432 5440 * state State bits to update.
5433 5441 *
5434 5442 * Return Value: None
5435 5443 *
5436 5444 * Context: Interrupt, Kernel or User context.
5437 5445 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5438 5446 * calling this function.
5439 5447 */
5440 5448 void
5441 5449 fcp_update_tgt_state(struct fcp_tgt *ptgt, int flag, uint32_t state)
5442 5450 {
5443 5451 struct fcp_lun *plun;
5444 5452
5445 5453 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5446 5454
5447 5455 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
5448 5456 /* The target is not offline. */
5449 5457 if (flag == FCP_SET) {
5450 5458 ptgt->tgt_state |= state;
5451 5459 ptgt->tgt_trace = 0;
5452 5460 } else {
5453 5461 ptgt->tgt_state &= ~state;
5454 5462 }
5455 5463
5456 5464 for (plun = ptgt->tgt_lun; plun != NULL;
5457 5465 plun = plun->lun_next) {
5458 5466 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
5459 5467 /* The LUN is not offline. */
5460 5468 if (flag == FCP_SET) {
5461 5469 plun->lun_state |= state;
5462 5470 plun->lun_trace = 0;
5463 5471 } else {
5464 5472 plun->lun_state &= ~state;
5465 5473 }
5466 5474 }
5467 5475 }
5468 5476 }
5469 5477 }
5470 5478
5471 5479 /*
5472 5480 * Function: fcp_update_tgt_state
5473 5481 *
5474 5482 * Description: This function updates the field lun_state of a LUN. That
5475 5483 * field is a bitmap and which bit can be set or reset
5476 5484 * individually.
5477 5485 *
5478 5486 * Argument: *plun LUN structure.
5479 5487 * flag Flag indication what action to apply (set/reset).
5480 5488 * state State bits to update.
5481 5489 *
5482 5490 * Return Value: None
5483 5491 *
5484 5492 * Context: Interrupt, Kernel or User context.
5485 5493 * The mutex of the target (ptgt->tgt_mutex) must be owned when
5486 5494 * calling this function.
5487 5495 */
5488 5496 void
5489 5497 fcp_update_lun_state(struct fcp_lun *plun, int flag, uint32_t state)
5490 5498 {
5491 5499 struct fcp_tgt *ptgt = plun->lun_tgt;
5492 5500
5493 5501 ASSERT(mutex_owned(&ptgt->tgt_mutex));
5494 5502
5495 5503 if (!(plun->lun_state & FCP_TGT_OFFLINE)) {
5496 5504 if (flag == FCP_SET) {
5497 5505 plun->lun_state |= state;
5498 5506 } else {
5499 5507 plun->lun_state &= ~state;
5500 5508 }
5501 5509 }
5502 5510 }
5503 5511
5504 5512 /*
5505 5513 * Function: fcp_get_port
5506 5514 *
5507 5515 * Description: This function returns the fcp_port structure from the opaque
5508 5516 * handle passed by the caller. That opaque handle is the handle
5509 5517 * used by fp/fctl to identify a particular local port. That
5510 5518 * handle has been stored in the corresponding fcp_port
5511 5519 * structure. This function is going to walk the global list of
5512 5520 * fcp_port structures till one has a port_fp_handle that matches
5513 5521 * the handle passed by the caller. This function enters the
5514 5522 * mutex fcp_global_mutex while walking the global list and then
5515 5523 * releases it.
5516 5524 *
5517 5525 * Argument: port_handle Opaque handle that fp/fctl uses to identify a
5518 5526 * particular port.
5519 5527 *
5520 5528 * Return Value: NULL Not found.
5521 5529 * Not NULL Pointer to the fcp_port structure.
5522 5530 *
5523 5531 * Context: Interrupt, Kernel or User context.
5524 5532 */
5525 5533 static struct fcp_port *
5526 5534 fcp_get_port(opaque_t port_handle)
5527 5535 {
5528 5536 struct fcp_port *pptr;
5529 5537
5530 5538 ASSERT(port_handle != NULL);
5531 5539
5532 5540 mutex_enter(&fcp_global_mutex);
5533 5541 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
5534 5542 if (pptr->port_fp_handle == port_handle) {
5535 5543 break;
5536 5544 }
5537 5545 }
5538 5546 mutex_exit(&fcp_global_mutex);
5539 5547
5540 5548 return (pptr);
5541 5549 }
5542 5550
5543 5551
5544 5552 static void
5545 5553 fcp_unsol_callback(fc_packet_t *fpkt)
5546 5554 {
5547 5555 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
5548 5556 struct fcp_port *pptr = icmd->ipkt_port;
5549 5557
5550 5558 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
5551 5559 caddr_t state, reason, action, expln;
5552 5560
5553 5561 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
5554 5562 &action, &expln);
5555 5563
5556 5564 fcp_log(CE_WARN, pptr->port_dip,
5557 5565 "!couldn't post response to unsolicited request: "
5558 5566 " state=%s reason=%s rx_id=%x ox_id=%x",
5559 5567 state, reason, fpkt->pkt_cmd_fhdr.ox_id,
5560 5568 fpkt->pkt_cmd_fhdr.rx_id);
5561 5569 }
5562 5570 fcp_icmd_free(pptr, icmd);
5563 5571 }
5564 5572
5565 5573
5566 5574 /*
5567 5575 * Perform general purpose preparation of a response to an unsolicited request
5568 5576 */
5569 5577 static void
5570 5578 fcp_unsol_resp_init(fc_packet_t *pkt, fc_unsol_buf_t *buf,
5571 5579 uchar_t r_ctl, uchar_t type)
5572 5580 {
5573 5581 pkt->pkt_cmd_fhdr.r_ctl = r_ctl;
5574 5582 pkt->pkt_cmd_fhdr.d_id = buf->ub_frame.s_id;
5575 5583 pkt->pkt_cmd_fhdr.s_id = buf->ub_frame.d_id;
5576 5584 pkt->pkt_cmd_fhdr.type = type;
5577 5585 pkt->pkt_cmd_fhdr.f_ctl = F_CTL_LAST_SEQ | F_CTL_XCHG_CONTEXT;
5578 5586 pkt->pkt_cmd_fhdr.seq_id = buf->ub_frame.seq_id;
5579 5587 pkt->pkt_cmd_fhdr.df_ctl = buf->ub_frame.df_ctl;
5580 5588 pkt->pkt_cmd_fhdr.seq_cnt = buf->ub_frame.seq_cnt;
5581 5589 pkt->pkt_cmd_fhdr.ox_id = buf->ub_frame.ox_id;
5582 5590 pkt->pkt_cmd_fhdr.rx_id = buf->ub_frame.rx_id;
5583 5591 pkt->pkt_cmd_fhdr.ro = 0;
5584 5592 pkt->pkt_cmd_fhdr.rsvd = 0;
5585 5593 pkt->pkt_comp = fcp_unsol_callback;
5586 5594 pkt->pkt_pd = NULL;
5587 5595 pkt->pkt_ub_resp_token = (opaque_t)buf;
5588 5596 }
5589 5597
5590 5598
5591 5599 /*ARGSUSED*/
5592 5600 static int
5593 5601 fcp_unsol_prli(struct fcp_port *pptr, fc_unsol_buf_t *buf)
5594 5602 {
5595 5603 fc_packet_t *fpkt;
5596 5604 struct la_els_prli prli;
5597 5605 struct fcp_prli *fprli;
5598 5606 struct fcp_ipkt *icmd;
5599 5607 struct la_els_prli *from;
5600 5608 struct fcp_prli *orig;
5601 5609 struct fcp_tgt *ptgt;
5602 5610 int tcount = 0;
5603 5611 int lcount;
5604 5612
5605 5613 from = (struct la_els_prli *)buf->ub_buffer;
5606 5614 orig = (struct fcp_prli *)from->service_params;
5607 5615 if ((ptgt = fcp_get_target_by_did(pptr, buf->ub_frame.s_id)) !=
5608 5616 NULL) {
5609 5617 mutex_enter(&ptgt->tgt_mutex);
5610 5618 tcount = ptgt->tgt_change_cnt;
5611 5619 mutex_exit(&ptgt->tgt_mutex);
5612 5620 }
5613 5621
5614 5622 mutex_enter(&pptr->port_mutex);
5615 5623 lcount = pptr->port_link_cnt;
5616 5624 mutex_exit(&pptr->port_mutex);
5617 5625
5618 5626 if ((icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (la_els_prli_t),
5619 5627 sizeof (la_els_prli_t), 0,
5620 5628 pptr->port_state & FCP_STATE_FCA_IS_NODMA,
5621 5629 lcount, tcount, 0, FC_INVALID_RSCN_COUNT)) == NULL) {
5622 5630 return (FC_FAILURE);
5623 5631 }
5624 5632
5625 5633 fpkt = icmd->ipkt_fpkt;
5626 5634 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
5627 5635 fpkt->pkt_tran_type = FC_PKT_OUTBOUND;
5628 5636 fpkt->pkt_timeout = FCP_ELS_TIMEOUT;
5629 5637 fpkt->pkt_cmdlen = sizeof (la_els_prli_t);
5630 5638 fpkt->pkt_rsplen = 0;
5631 5639 fpkt->pkt_datalen = 0;
5632 5640
5633 5641 icmd->ipkt_opcode = LA_ELS_PRLI;
5634 5642
5635 5643 bzero(&prli, sizeof (struct la_els_prli));
5636 5644 fprli = (struct fcp_prli *)prli.service_params;
5637 5645 prli.ls_code = LA_ELS_ACC;
5638 5646 prli.page_length = 0x10;
5639 5647 prli.payload_length = sizeof (struct la_els_prli);
5640 5648
5641 5649 /* fill in service params */
5642 5650 fprli->type = 0x08;
5643 5651 fprli->resvd1 = 0;
5644 5652 fprli->orig_process_assoc_valid = orig->orig_process_assoc_valid;
5645 5653 fprli->orig_process_associator = orig->orig_process_associator;
5646 5654 fprli->resp_process_assoc_valid = 0;
5647 5655 fprli->establish_image_pair = 1;
5648 5656 fprli->resvd2 = 0;
5649 5657 fprli->resvd3 = 0;
5650 5658 fprli->obsolete_1 = 0;
5651 5659 fprli->obsolete_2 = 0;
5652 5660 fprli->data_overlay_allowed = 0;
5653 5661 fprli->initiator_fn = 1;
5654 5662 fprli->confirmed_compl_allowed = 1;
5655 5663
5656 5664 if (fc_ulp_is_name_present("ltct") == FC_SUCCESS) {
5657 5665 fprli->target_fn = 1;
5658 5666 } else {
5659 5667 fprli->target_fn = 0;
5660 5668 }
5661 5669
5662 5670 fprli->retry = 1;
5663 5671 fprli->read_xfer_rdy_disabled = 1;
5664 5672 fprli->write_xfer_rdy_disabled = 0;
5665 5673
5666 5674 /* save the unsol prli payload first */
5667 5675 FCP_CP_OUT((uint8_t *)from, fpkt->pkt_resp,
5668 5676 fpkt->pkt_resp_acc, sizeof (struct la_els_prli));
5669 5677
5670 5678 FCP_CP_OUT((uint8_t *)&prli, fpkt->pkt_cmd,
5671 5679 fpkt->pkt_cmd_acc, sizeof (struct la_els_prli));
5672 5680
5673 5681 fcp_unsol_resp_init(fpkt, buf, R_CTL_ELS_RSP, FC_TYPE_EXTENDED_LS);
5674 5682
5675 5683 mutex_enter(&pptr->port_mutex);
5676 5684 if (!FCP_LINK_STATE_CHANGED(pptr, icmd)) {
5677 5685 int rval;
5678 5686 mutex_exit(&pptr->port_mutex);
5679 5687
5680 5688 if ((rval = fc_ulp_issue_els(pptr->port_fp_handle, fpkt)) !=
5681 5689 FC_SUCCESS) {
5682 5690 if ((rval == FC_STATEC_BUSY || rval == FC_OFFLINE) &&
5683 5691 ptgt != NULL) {
5684 5692 fcp_queue_ipkt(pptr, fpkt);
5685 5693 return (FC_SUCCESS);
5686 5694 }
5687 5695 /* Let it timeout */
5688 5696 fcp_icmd_free(pptr, icmd);
5689 5697 return (FC_FAILURE);
5690 5698 }
5691 5699 } else {
5692 5700 mutex_exit(&pptr->port_mutex);
5693 5701 fcp_icmd_free(pptr, icmd);
5694 5702 return (FC_FAILURE);
5695 5703 }
5696 5704
5697 5705 (void) fc_ulp_ubrelease(pptr->port_fp_handle, 1, &buf->ub_token);
5698 5706
5699 5707 return (FC_SUCCESS);
5700 5708 }
5701 5709
5702 5710 /*
5703 5711 * Function: fcp_icmd_alloc
5704 5712 *
5705 5713 * Description: This function allocated a fcp_ipkt structure. The pkt_comp
5706 5714 * field is initialized to fcp_icmd_callback. Sometimes it is
5707 5715 * modified by the caller (such as fcp_send_scsi). The
5708 5716 * structure is also tied to the state of the line and of the
5709 5717 * target at a particular time. That link is established by
5710 5718 * setting the fields ipkt_link_cnt and ipkt_change_cnt to lcount
5711 5719 * and tcount which came respectively from pptr->link_cnt and
5712 5720 * ptgt->tgt_change_cnt.
5713 5721 *
5714 5722 * Argument: *pptr Fcp port.
5715 5723 * *ptgt Target (destination of the command).
5716 5724 * cmd_len Length of the command.
5717 5725 * resp_len Length of the expected response.
5718 5726 * data_len Length of the data.
5719 5727 * nodma Indicates weither the command and response.
5720 5728 * will be transfer through DMA or not.
5721 5729 * lcount Link state change counter.
5722 5730 * tcount Target state change counter.
5723 5731 * cause Reason that lead to this call.
5724 5732 *
5725 5733 * Return Value: NULL Failed.
5726 5734 * Not NULL Internal packet address.
5727 5735 */
5728 5736 static struct fcp_ipkt *
5729 5737 fcp_icmd_alloc(struct fcp_port *pptr, struct fcp_tgt *ptgt, int cmd_len,
5730 5738 int resp_len, int data_len, int nodma, int lcount, int tcount, int cause,
5731 5739 uint32_t rscn_count)
5732 5740 {
5733 5741 int dma_setup = 0;
5734 5742 fc_packet_t *fpkt;
5735 5743 struct fcp_ipkt *icmd = NULL;
5736 5744
5737 5745 icmd = kmem_zalloc(sizeof (struct fcp_ipkt) +
5738 5746 pptr->port_dmacookie_sz + pptr->port_priv_pkt_len,
5739 5747 KM_NOSLEEP);
5740 5748 if (icmd == NULL) {
5741 5749 fcp_log(CE_WARN, pptr->port_dip,
5742 5750 "!internal packet allocation failed");
5743 5751 return (NULL);
5744 5752 }
5745 5753
5746 5754 /*
5747 5755 * initialize the allocated packet
5748 5756 */
5749 5757 icmd->ipkt_nodma = nodma;
5750 5758 icmd->ipkt_next = icmd->ipkt_prev = NULL;
5751 5759 icmd->ipkt_lun = NULL;
5752 5760
5753 5761 icmd->ipkt_link_cnt = lcount;
5754 5762 icmd->ipkt_change_cnt = tcount;
5755 5763 icmd->ipkt_cause = cause;
5756 5764
5757 5765 mutex_enter(&pptr->port_mutex);
5758 5766 icmd->ipkt_port = pptr;
5759 5767 mutex_exit(&pptr->port_mutex);
5760 5768
5761 5769 /* keep track of amt of data to be sent in pkt */
5762 5770 icmd->ipkt_cmdlen = cmd_len;
5763 5771 icmd->ipkt_resplen = resp_len;
5764 5772 icmd->ipkt_datalen = data_len;
5765 5773
5766 5774 /* set up pkt's ptr to the fc_packet_t struct, just after the ipkt */
5767 5775 icmd->ipkt_fpkt = (fc_packet_t *)(&icmd->ipkt_fc_packet);
5768 5776
5769 5777 /* set pkt's private ptr to point to cmd pkt */
5770 5778 icmd->ipkt_fpkt->pkt_ulp_private = (opaque_t)icmd;
5771 5779
5772 5780 /* set FCA private ptr to memory just beyond */
5773 5781 icmd->ipkt_fpkt->pkt_fca_private = (opaque_t)
5774 5782 ((char *)icmd + sizeof (struct fcp_ipkt) +
5775 5783 pptr->port_dmacookie_sz);
5776 5784
5777 5785 /* get ptr to fpkt substruct and fill it in */
5778 5786 fpkt = icmd->ipkt_fpkt;
5779 5787 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)icmd +
5780 5788 sizeof (struct fcp_ipkt));
5781 5789
5782 5790 if (ptgt != NULL) {
5783 5791 icmd->ipkt_tgt = ptgt;
5784 5792 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
5785 5793 }
5786 5794
5787 5795 fpkt->pkt_comp = fcp_icmd_callback;
5788 5796 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
5789 5797 fpkt->pkt_cmdlen = cmd_len;
5790 5798 fpkt->pkt_rsplen = resp_len;
5791 5799 fpkt->pkt_datalen = data_len;
5792 5800
5793 5801 /*
5794 5802 * The pkt_ulp_rscn_infop (aka pkt_ulp_rsvd1) field is used to pass the
5795 5803 * rscn_count as fcp knows down to the transport. If a valid count was
5796 5804 * passed into this function, we allocate memory to actually pass down
5797 5805 * this info.
5798 5806 *
5799 5807 * BTW, if the kmem_zalloc fails, we won't try too hard. This will
5800 5808 * basically mean that fcp will not be able to help transport
5801 5809 * distinguish if a new RSCN has come after fcp was last informed about
5802 5810 * it. In such cases, it might lead to the problem mentioned in CR/bug #
5803 5811 * 5068068 where the device might end up going offline in case of RSCN
5804 5812 * storms.
5805 5813 */
5806 5814 fpkt->pkt_ulp_rscn_infop = NULL;
5807 5815 if (rscn_count != FC_INVALID_RSCN_COUNT) {
5808 5816 fpkt->pkt_ulp_rscn_infop = kmem_zalloc(
5809 5817 sizeof (fc_ulp_rscn_info_t), KM_NOSLEEP);
5810 5818 if (fpkt->pkt_ulp_rscn_infop == NULL) {
5811 5819 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5812 5820 fcp_trace, FCP_BUF_LEVEL_6, 0,
5813 5821 "Failed to alloc memory to pass rscn info");
5814 5822 }
5815 5823 }
5816 5824
5817 5825 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5818 5826 fc_ulp_rscn_info_t *rscnp;
5819 5827
5820 5828 rscnp = (fc_ulp_rscn_info_t *)fpkt->pkt_ulp_rscn_infop;
5821 5829 rscnp->ulp_rscn_count = rscn_count;
5822 5830 }
5823 5831
5824 5832 if (fcp_alloc_dma(pptr, icmd, nodma, KM_NOSLEEP) != FC_SUCCESS) {
5825 5833 goto fail;
5826 5834 }
5827 5835 dma_setup++;
5828 5836
5829 5837 /*
5830 5838 * Must hold target mutex across setting of pkt_pd and call to
5831 5839 * fc_ulp_init_packet to ensure the handle to the target doesn't go
5832 5840 * away while we're not looking.
5833 5841 */
5834 5842 if (ptgt != NULL) {
5835 5843 mutex_enter(&ptgt->tgt_mutex);
5836 5844 fpkt->pkt_pd = ptgt->tgt_pd_handle;
5837 5845
5838 5846 /* ask transport to do its initialization on this pkt */
5839 5847 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5840 5848 != FC_SUCCESS) {
5841 5849 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5842 5850 fcp_trace, FCP_BUF_LEVEL_6, 0,
5843 5851 "fc_ulp_init_packet failed");
5844 5852 mutex_exit(&ptgt->tgt_mutex);
5845 5853 goto fail;
5846 5854 }
5847 5855 mutex_exit(&ptgt->tgt_mutex);
5848 5856 } else {
5849 5857 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, KM_NOSLEEP)
5850 5858 != FC_SUCCESS) {
5851 5859 FCP_TRACE(fcp_logq, pptr->port_instbuf,
5852 5860 fcp_trace, FCP_BUF_LEVEL_6, 0,
5853 5861 "fc_ulp_init_packet failed");
5854 5862 goto fail;
5855 5863 }
5856 5864 }
5857 5865
5858 5866 mutex_enter(&pptr->port_mutex);
5859 5867 if (pptr->port_state & (FCP_STATE_DETACHING |
5860 5868 FCP_STATE_SUSPENDED | FCP_STATE_POWER_DOWN)) {
5861 5869 int rval;
5862 5870
5863 5871 mutex_exit(&pptr->port_mutex);
5864 5872
5865 5873 rval = fc_ulp_uninit_packet(pptr->port_fp_handle, fpkt);
5866 5874 ASSERT(rval == FC_SUCCESS);
5867 5875
5868 5876 goto fail;
5869 5877 }
5870 5878
5871 5879 if (ptgt != NULL) {
5872 5880 mutex_enter(&ptgt->tgt_mutex);
5873 5881 ptgt->tgt_ipkt_cnt++;
5874 5882 mutex_exit(&ptgt->tgt_mutex);
5875 5883 }
5876 5884
5877 5885 pptr->port_ipkt_cnt++;
5878 5886
5879 5887 mutex_exit(&pptr->port_mutex);
5880 5888
5881 5889 return (icmd);
5882 5890
5883 5891 fail:
5884 5892 if (fpkt->pkt_ulp_rscn_infop != NULL) {
5885 5893 kmem_free(fpkt->pkt_ulp_rscn_infop,
5886 5894 sizeof (fc_ulp_rscn_info_t));
5887 5895 fpkt->pkt_ulp_rscn_infop = NULL;
5888 5896 }
5889 5897
5890 5898 if (dma_setup) {
5891 5899 fcp_free_dma(pptr, icmd);
5892 5900 }
5893 5901 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5894 5902 (size_t)pptr->port_dmacookie_sz);
5895 5903
5896 5904 return (NULL);
5897 5905 }
5898 5906
5899 5907 /*
5900 5908 * Function: fcp_icmd_free
5901 5909 *
5902 5910 * Description: Frees the internal command passed by the caller.
5903 5911 *
5904 5912 * Argument: *pptr Fcp port.
5905 5913 * *icmd Internal packet to free.
5906 5914 *
5907 5915 * Return Value: None
5908 5916 */
5909 5917 static void
5910 5918 fcp_icmd_free(struct fcp_port *pptr, struct fcp_ipkt *icmd)
5911 5919 {
5912 5920 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
5913 5921
5914 5922 /* Let the underlying layers do their cleanup. */
5915 5923 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
5916 5924 icmd->ipkt_fpkt);
5917 5925
5918 5926 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop) {
5919 5927 kmem_free(icmd->ipkt_fpkt->pkt_ulp_rscn_infop,
5920 5928 sizeof (fc_ulp_rscn_info_t));
5921 5929 }
5922 5930
5923 5931 fcp_free_dma(pptr, icmd);
5924 5932
5925 5933 kmem_free(icmd, sizeof (struct fcp_ipkt) + pptr->port_priv_pkt_len +
5926 5934 (size_t)pptr->port_dmacookie_sz);
5927 5935
5928 5936 mutex_enter(&pptr->port_mutex);
5929 5937
5930 5938 if (ptgt) {
5931 5939 mutex_enter(&ptgt->tgt_mutex);
5932 5940 ptgt->tgt_ipkt_cnt--;
5933 5941 mutex_exit(&ptgt->tgt_mutex);
5934 5942 }
5935 5943
5936 5944 pptr->port_ipkt_cnt--;
5937 5945 mutex_exit(&pptr->port_mutex);
5938 5946 }
5939 5947
5940 5948 /*
5941 5949 * Function: fcp_alloc_dma
5942 5950 *
5943 5951 * Description: Allocated the DMA resources required for the internal
5944 5952 * packet.
5945 5953 *
5946 5954 * Argument: *pptr FCP port.
5947 5955 * *icmd Internal FCP packet.
5948 5956 * nodma Indicates if the Cmd and Resp will be DMAed.
5949 5957 * flags Allocation flags (Sleep or NoSleep).
5950 5958 *
5951 5959 * Return Value: FC_SUCCESS
5952 5960 * FC_NOMEM
5953 5961 */
5954 5962 static int
5955 5963 fcp_alloc_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd,
5956 5964 int nodma, int flags)
5957 5965 {
5958 5966 int rval;
5959 5967 size_t real_size;
5960 5968 uint_t ccount;
5961 5969 int bound = 0;
5962 5970 int cmd_resp = 0;
5963 5971 fc_packet_t *fpkt;
5964 5972 ddi_dma_cookie_t pkt_data_cookie;
5965 5973 ddi_dma_cookie_t *cp;
5966 5974 uint32_t cnt;
5967 5975
5968 5976 fpkt = &icmd->ipkt_fc_packet;
5969 5977
5970 5978 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_data_dma == NULL &&
5971 5979 fpkt->pkt_resp_dma == NULL);
5972 5980
5973 5981 icmd->ipkt_nodma = nodma;
5974 5982
5975 5983 if (nodma) {
5976 5984 fpkt->pkt_cmd = kmem_zalloc(fpkt->pkt_cmdlen, flags);
5977 5985 if (fpkt->pkt_cmd == NULL) {
5978 5986 goto fail;
5979 5987 }
5980 5988
5981 5989 fpkt->pkt_resp = kmem_zalloc(fpkt->pkt_rsplen, flags);
5982 5990 if (fpkt->pkt_resp == NULL) {
5983 5991 goto fail;
5984 5992 }
5985 5993 } else {
5986 5994 ASSERT(fpkt->pkt_cmdlen && fpkt->pkt_rsplen);
5987 5995
5988 5996 rval = fcp_alloc_cmd_resp(pptr, fpkt, flags);
5989 5997 if (rval == FC_FAILURE) {
5990 5998 ASSERT(fpkt->pkt_cmd_dma == NULL &&
5991 5999 fpkt->pkt_resp_dma == NULL);
5992 6000 goto fail;
5993 6001 }
5994 6002 cmd_resp++;
5995 6003 }
5996 6004
5997 6005 if ((fpkt->pkt_datalen != 0) &&
5998 6006 !(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
5999 6007 /*
6000 6008 * set up DMA handle and memory for the data in this packet
6001 6009 */
6002 6010 if (ddi_dma_alloc_handle(pptr->port_dip,
6003 6011 &pptr->port_data_dma_attr, DDI_DMA_DONTWAIT,
6004 6012 NULL, &fpkt->pkt_data_dma) != DDI_SUCCESS) {
6005 6013 goto fail;
6006 6014 }
6007 6015
6008 6016 if (ddi_dma_mem_alloc(fpkt->pkt_data_dma, fpkt->pkt_datalen,
6009 6017 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT,
6010 6018 DDI_DMA_DONTWAIT, NULL, &fpkt->pkt_data,
6011 6019 &real_size, &fpkt->pkt_data_acc) != DDI_SUCCESS) {
6012 6020 goto fail;
6013 6021 }
6014 6022
6015 6023 /* was DMA mem size gotten < size asked for/needed ?? */
6016 6024 if (real_size < fpkt->pkt_datalen) {
6017 6025 goto fail;
6018 6026 }
6019 6027
6020 6028 /* bind DMA address and handle together */
6021 6029 if (ddi_dma_addr_bind_handle(fpkt->pkt_data_dma,
6022 6030 NULL, fpkt->pkt_data, real_size, DDI_DMA_READ |
6023 6031 DDI_DMA_CONSISTENT, DDI_DMA_DONTWAIT, NULL,
6024 6032 &pkt_data_cookie, &ccount) != DDI_DMA_MAPPED) {
6025 6033 goto fail;
6026 6034 }
6027 6035 bound++;
6028 6036
6029 6037 if (ccount > pptr->port_data_dma_attr.dma_attr_sgllen) {
6030 6038 goto fail;
6031 6039 }
6032 6040
6033 6041 fpkt->pkt_data_cookie_cnt = ccount;
6034 6042
6035 6043 cp = fpkt->pkt_data_cookie;
6036 6044 *cp = pkt_data_cookie;
6037 6045 cp++;
6038 6046
6039 6047 for (cnt = 1; cnt < ccount; cnt++, cp++) {
6040 6048 ddi_dma_nextcookie(fpkt->pkt_data_dma,
6041 6049 &pkt_data_cookie);
6042 6050 *cp = pkt_data_cookie;
6043 6051 }
6044 6052
6045 6053 } else if (fpkt->pkt_datalen != 0) {
6046 6054 /*
6047 6055 * If it's a pseudo FCA, then it can't support DMA even in
6048 6056 * SCSI data phase.
6049 6057 */
6050 6058 fpkt->pkt_data = kmem_alloc(fpkt->pkt_datalen, flags);
6051 6059 if (fpkt->pkt_data == NULL) {
6052 6060 goto fail;
6053 6061 }
6054 6062
6055 6063 }
6056 6064
6057 6065 return (FC_SUCCESS);
6058 6066
6059 6067 fail:
6060 6068 if (bound) {
6061 6069 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6062 6070 }
6063 6071
6064 6072 if (fpkt->pkt_data_dma) {
6065 6073 if (fpkt->pkt_data) {
6066 6074 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6067 6075 }
6068 6076 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6069 6077 } else {
6070 6078 if (fpkt->pkt_data) {
6071 6079 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6072 6080 }
6073 6081 }
6074 6082
6075 6083 if (nodma) {
6076 6084 if (fpkt->pkt_cmd) {
6077 6085 kmem_free(fpkt->pkt_cmd, fpkt->pkt_cmdlen);
6078 6086 }
6079 6087 if (fpkt->pkt_resp) {
6080 6088 kmem_free(fpkt->pkt_resp, fpkt->pkt_rsplen);
6081 6089 }
6082 6090 } else {
6083 6091 if (cmd_resp) {
6084 6092 fcp_free_cmd_resp(pptr, fpkt);
6085 6093 }
6086 6094 }
6087 6095
6088 6096 return (FC_NOMEM);
6089 6097 }
6090 6098
6091 6099
6092 6100 static void
6093 6101 fcp_free_dma(struct fcp_port *pptr, struct fcp_ipkt *icmd)
6094 6102 {
6095 6103 fc_packet_t *fpkt = icmd->ipkt_fpkt;
6096 6104
6097 6105 if (fpkt->pkt_data_dma) {
6098 6106 (void) ddi_dma_unbind_handle(fpkt->pkt_data_dma);
6099 6107 if (fpkt->pkt_data) {
6100 6108 ddi_dma_mem_free(&fpkt->pkt_data_acc);
6101 6109 }
6102 6110 ddi_dma_free_handle(&fpkt->pkt_data_dma);
6103 6111 } else {
6104 6112 if (fpkt->pkt_data) {
6105 6113 kmem_free(fpkt->pkt_data, fpkt->pkt_datalen);
6106 6114 }
6107 6115 /*
6108 6116 * Need we reset pkt_* to zero???
6109 6117 */
6110 6118 }
6111 6119
6112 6120 if (icmd->ipkt_nodma) {
6113 6121 if (fpkt->pkt_cmd) {
6114 6122 kmem_free(fpkt->pkt_cmd, icmd->ipkt_cmdlen);
6115 6123 }
6116 6124 if (fpkt->pkt_resp) {
6117 6125 kmem_free(fpkt->pkt_resp, icmd->ipkt_resplen);
6118 6126 }
6119 6127 } else {
6120 6128 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
6121 6129
6122 6130 fcp_free_cmd_resp(pptr, fpkt);
6123 6131 }
6124 6132 }
6125 6133
6126 6134 /*
6127 6135 * Function: fcp_lookup_target
6128 6136 *
6129 6137 * Description: Finds a target given a WWN.
6130 6138 *
6131 6139 * Argument: *pptr FCP port.
6132 6140 * *wwn World Wide Name of the device to look for.
6133 6141 *
6134 6142 * Return Value: NULL No target found
6135 6143 * Not NULL Target structure
6136 6144 *
6137 6145 * Context: Interrupt context.
6138 6146 * The mutex pptr->port_mutex must be owned.
6139 6147 */
6140 6148 /* ARGSUSED */
6141 6149 static struct fcp_tgt *
6142 6150 fcp_lookup_target(struct fcp_port *pptr, uchar_t *wwn)
6143 6151 {
6144 6152 int hash;
6145 6153 struct fcp_tgt *ptgt;
6146 6154
6147 6155 ASSERT(mutex_owned(&pptr->port_mutex));
6148 6156
6149 6157 hash = FCP_HASH(wwn);
6150 6158
6151 6159 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
6152 6160 ptgt = ptgt->tgt_next) {
6153 6161 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN) &&
6154 6162 bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
6155 6163 sizeof (ptgt->tgt_port_wwn)) == 0) {
6156 6164 break;
6157 6165 }
6158 6166 }
6159 6167
6160 6168 return (ptgt);
6161 6169 }
6162 6170
6163 6171
6164 6172 /*
6165 6173 * Find target structure given a port identifier
6166 6174 */
6167 6175 static struct fcp_tgt *
6168 6176 fcp_get_target_by_did(struct fcp_port *pptr, uint32_t d_id)
6169 6177 {
6170 6178 fc_portid_t port_id;
6171 6179 la_wwn_t pwwn;
6172 6180 struct fcp_tgt *ptgt = NULL;
6173 6181
6174 6182 port_id.priv_lilp_posit = 0;
6175 6183 port_id.port_id = d_id;
6176 6184 if (fc_ulp_get_pwwn_by_did(pptr->port_fp_handle, port_id,
6177 6185 &pwwn) == FC_SUCCESS) {
6178 6186 mutex_enter(&pptr->port_mutex);
6179 6187 ptgt = fcp_lookup_target(pptr, pwwn.raw_wwn);
6180 6188 mutex_exit(&pptr->port_mutex);
6181 6189 }
6182 6190
6183 6191 return (ptgt);
6184 6192 }
6185 6193
6186 6194
6187 6195 /*
6188 6196 * the packet completion callback routine for info cmd pkts
6189 6197 *
6190 6198 * this means fpkt pts to a response to either a PLOGI or a PRLI
6191 6199 *
6192 6200 * if there is an error an attempt is made to call a routine to resend
6193 6201 * the command that failed
6194 6202 */
6195 6203 static void
6196 6204 fcp_icmd_callback(fc_packet_t *fpkt)
6197 6205 {
6198 6206 struct fcp_ipkt *icmd;
6199 6207 struct fcp_port *pptr;
6200 6208 struct fcp_tgt *ptgt;
6201 6209 struct la_els_prli *prli;
6202 6210 struct la_els_prli prli_s;
6203 6211 struct fcp_prli *fprli;
6204 6212 struct fcp_lun *plun;
6205 6213 int free_pkt = 1;
6206 6214 int rval;
6207 6215 ls_code_t resp;
6208 6216 uchar_t prli_acc = 0;
6209 6217 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
6210 6218 int lun0_newalloc;
6211 6219
6212 6220 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
6213 6221
6214 6222 /* get ptrs to the port and target structs for the cmd */
6215 6223 pptr = icmd->ipkt_port;
6216 6224 ptgt = icmd->ipkt_tgt;
6217 6225
6218 6226 FCP_CP_IN(fpkt->pkt_resp, &resp, fpkt->pkt_resp_acc, sizeof (resp));
6219 6227
6220 6228 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
6221 6229 FCP_CP_IN(fpkt->pkt_cmd, &prli_s, fpkt->pkt_cmd_acc,
6222 6230 sizeof (prli_s));
6223 6231 prli_acc = (prli_s.ls_code == LA_ELS_ACC);
6224 6232 }
6225 6233
6226 6234 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6227 6235 fcp_trace, FCP_BUF_LEVEL_2, 0,
6228 6236 "ELS (%x) callback state=0x%x reason=0x%x for %x",
6229 6237 icmd->ipkt_opcode, fpkt->pkt_state, fpkt->pkt_reason,
6230 6238 ptgt->tgt_d_id);
6231 6239
6232 6240 if ((fpkt->pkt_state == FC_PKT_SUCCESS) &&
6233 6241 ((resp.ls_code == LA_ELS_ACC) || prli_acc)) {
6234 6242
6235 6243 mutex_enter(&ptgt->tgt_mutex);
6236 6244 if (ptgt->tgt_pd_handle == NULL) {
6237 6245 /*
6238 6246 * in a fabric environment the port device handles
6239 6247 * get created only after successful LOGIN into the
6240 6248 * transport, so the transport makes this port
6241 6249 * device (pd) handle available in this packet, so
6242 6250 * save it now
6243 6251 */
6244 6252 ASSERT(fpkt->pkt_pd != NULL);
6245 6253 ptgt->tgt_pd_handle = fpkt->pkt_pd;
6246 6254 }
6247 6255 mutex_exit(&ptgt->tgt_mutex);
6248 6256
6249 6257 /* which ELS cmd is this response for ?? */
6250 6258 switch (icmd->ipkt_opcode) {
6251 6259 case LA_ELS_PLOGI:
6252 6260 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6253 6261 fcp_trace, FCP_BUF_LEVEL_5, 0,
6254 6262 "PLOGI to d_id=0x%x succeeded, wwn=%08x%08x",
6255 6263 ptgt->tgt_d_id,
6256 6264 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
6257 6265 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]));
6258 6266
6259 6267 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6260 6268 FCP_TGT_TRACE_15);
6261 6269
6262 6270 /* Note that we are not allocating a new icmd */
6263 6271 if (fcp_send_els(pptr, ptgt, icmd, LA_ELS_PRLI,
6264 6272 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6265 6273 icmd->ipkt_cause) != DDI_SUCCESS) {
6266 6274 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6267 6275 FCP_TGT_TRACE_16);
6268 6276 goto fail;
6269 6277 }
6270 6278 break;
6271 6279
6272 6280 case LA_ELS_PRLI:
6273 6281 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6274 6282 fcp_trace, FCP_BUF_LEVEL_5, 0,
6275 6283 "PRLI to d_id=0x%x succeeded", ptgt->tgt_d_id);
6276 6284
6277 6285 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6278 6286 FCP_TGT_TRACE_17);
6279 6287
6280 6288 prli = &prli_s;
6281 6289
6282 6290 FCP_CP_IN(fpkt->pkt_resp, prli, fpkt->pkt_resp_acc,
6283 6291 sizeof (prli_s));
6284 6292
6285 6293 fprli = (struct fcp_prli *)prli->service_params;
6286 6294
6287 6295 mutex_enter(&ptgt->tgt_mutex);
6288 6296 ptgt->tgt_icap = fprli->initiator_fn;
6289 6297 ptgt->tgt_tcap = fprli->target_fn;
6290 6298 mutex_exit(&ptgt->tgt_mutex);
6291 6299
6292 6300 if ((fprli->type != 0x08) || (fprli->target_fn != 1)) {
6293 6301 /*
6294 6302 * this FCP device does not support target mode
6295 6303 */
6296 6304 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6297 6305 FCP_TGT_TRACE_18);
6298 6306 goto fail;
6299 6307 }
6300 6308 if (fprli->retry == 1) {
6301 6309 fc_ulp_disable_relogin(pptr->port_fp_handle,
6302 6310 &ptgt->tgt_port_wwn);
6303 6311 }
6304 6312
6305 6313 /* target is no longer offline */
6306 6314 mutex_enter(&pptr->port_mutex);
6307 6315 mutex_enter(&ptgt->tgt_mutex);
6308 6316 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6309 6317 ptgt->tgt_state &= ~(FCP_TGT_OFFLINE |
6310 6318 FCP_TGT_MARK);
6311 6319 } else {
6312 6320 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6313 6321 fcp_trace, FCP_BUF_LEVEL_2, 0,
6314 6322 "fcp_icmd_callback,1: state change "
6315 6323 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6316 6324 mutex_exit(&ptgt->tgt_mutex);
6317 6325 mutex_exit(&pptr->port_mutex);
6318 6326 goto fail;
6319 6327 }
6320 6328 mutex_exit(&ptgt->tgt_mutex);
6321 6329 mutex_exit(&pptr->port_mutex);
6322 6330
6323 6331 /*
6324 6332 * lun 0 should always respond to inquiry, so
6325 6333 * get the LUN struct for LUN 0
6326 6334 *
6327 6335 * Currently we deal with first level of addressing.
6328 6336 * If / when we start supporting 0x device types
6329 6337 * (DTYPE_ARRAY_CTRL, i.e. array controllers)
6330 6338 * this logic will need revisiting.
6331 6339 */
6332 6340 lun0_newalloc = 0;
6333 6341 if ((plun = fcp_get_lun(ptgt, 0)) == NULL) {
6334 6342 /*
6335 6343 * no LUN struct for LUN 0 yet exists,
6336 6344 * so create one
6337 6345 */
6338 6346 plun = fcp_alloc_lun(ptgt);
6339 6347 if (plun == NULL) {
6340 6348 fcp_log(CE_WARN, pptr->port_dip,
6341 6349 "!Failed to allocate lun 0 for"
6342 6350 " D_ID=%x", ptgt->tgt_d_id);
6343 6351 goto fail;
6344 6352 }
6345 6353 lun0_newalloc = 1;
6346 6354 }
6347 6355
6348 6356 /* fill in LUN info */
6349 6357 mutex_enter(&ptgt->tgt_mutex);
6350 6358 /*
6351 6359 * consider lun 0 as device not connected if it is
6352 6360 * offlined or newly allocated
6353 6361 */
6354 6362 if ((plun->lun_state & FCP_LUN_OFFLINE) ||
6355 6363 lun0_newalloc) {
6356 6364 plun->lun_state |= FCP_LUN_DEVICE_NOT_CONNECTED;
6357 6365 }
6358 6366 plun->lun_state |= (FCP_LUN_BUSY | FCP_LUN_MARK);
6359 6367 plun->lun_state &= ~FCP_LUN_OFFLINE;
6360 6368 ptgt->tgt_lun_cnt = 1;
6361 6369 ptgt->tgt_report_lun_cnt = 0;
6362 6370 mutex_exit(&ptgt->tgt_mutex);
6363 6371
6364 6372 /* Retrieve the rscn count (if a valid one exists) */
6365 6373 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
6366 6374 rscn_count = ((fc_ulp_rscn_info_t *)
6367 6375 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))
6368 6376 ->ulp_rscn_count;
6369 6377 } else {
6370 6378 rscn_count = FC_INVALID_RSCN_COUNT;
6371 6379 }
6372 6380
6373 6381 /* send Report Lun request to target */
6374 6382 if (fcp_send_scsi(plun, SCMD_REPORT_LUN,
6375 6383 sizeof (struct fcp_reportlun_resp),
6376 6384 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
6377 6385 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
6378 6386 mutex_enter(&pptr->port_mutex);
6379 6387 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6380 6388 fcp_log(CE_WARN, pptr->port_dip,
6381 6389 "!Failed to send REPORT LUN to"
6382 6390 " D_ID=%x", ptgt->tgt_d_id);
6383 6391 } else {
6384 6392 FCP_TRACE(fcp_logq,
6385 6393 pptr->port_instbuf, fcp_trace,
6386 6394 FCP_BUF_LEVEL_5, 0,
6387 6395 "fcp_icmd_callback,2:state change"
6388 6396 " occured for D_ID=0x%x",
6389 6397 ptgt->tgt_d_id);
6390 6398 }
6391 6399 mutex_exit(&pptr->port_mutex);
6392 6400
6393 6401 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6394 6402 FCP_TGT_TRACE_19);
6395 6403
6396 6404 goto fail;
6397 6405 } else {
6398 6406 free_pkt = 0;
6399 6407 fcp_icmd_free(pptr, icmd);
6400 6408 }
6401 6409 break;
6402 6410
6403 6411 default:
6404 6412 fcp_log(CE_WARN, pptr->port_dip,
6405 6413 "!fcp_icmd_callback Invalid opcode");
6406 6414 goto fail;
6407 6415 }
6408 6416
6409 6417 return;
6410 6418 }
6411 6419
6412 6420
6413 6421 /*
6414 6422 * Other PLOGI failures are not retried as the
6415 6423 * transport does it already
6416 6424 */
6417 6425 if (icmd->ipkt_opcode != LA_ELS_PLOGI) {
6418 6426 if (fcp_is_retryable(icmd) &&
6419 6427 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6420 6428
6421 6429 if (FCP_MUST_RETRY(fpkt)) {
6422 6430 fcp_queue_ipkt(pptr, fpkt);
6423 6431 return;
6424 6432 }
6425 6433
6426 6434 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6427 6435 fcp_trace, FCP_BUF_LEVEL_2, 0,
6428 6436 "ELS PRLI is retried for d_id=0x%x, state=%x,"
6429 6437 " reason= %x", ptgt->tgt_d_id, fpkt->pkt_state,
6430 6438 fpkt->pkt_reason);
6431 6439
6432 6440 /*
6433 6441 * Retry by recalling the routine that
6434 6442 * originally queued this packet
6435 6443 */
6436 6444 mutex_enter(&pptr->port_mutex);
6437 6445 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6438 6446 caddr_t msg;
6439 6447
6440 6448 mutex_exit(&pptr->port_mutex);
6441 6449
6442 6450 ASSERT(icmd->ipkt_opcode != LA_ELS_PLOGI);
6443 6451
6444 6452 if (fpkt->pkt_state == FC_PKT_TIMEOUT) {
6445 6453 fpkt->pkt_timeout +=
6446 6454 FCP_TIMEOUT_DELTA;
6447 6455 }
6448 6456
6449 6457 rval = fc_ulp_issue_els(pptr->port_fp_handle,
6450 6458 fpkt);
6451 6459 if (rval == FC_SUCCESS) {
6452 6460 return;
6453 6461 }
6454 6462
6455 6463 if (rval == FC_STATEC_BUSY ||
6456 6464 rval == FC_OFFLINE) {
6457 6465 fcp_queue_ipkt(pptr, fpkt);
6458 6466 return;
6459 6467 }
6460 6468 (void) fc_ulp_error(rval, &msg);
6461 6469
6462 6470 fcp_log(CE_NOTE, pptr->port_dip,
6463 6471 "!ELS 0x%x failed to d_id=0x%x;"
6464 6472 " %s", icmd->ipkt_opcode,
6465 6473 ptgt->tgt_d_id, msg);
6466 6474 } else {
6467 6475 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6468 6476 fcp_trace, FCP_BUF_LEVEL_2, 0,
6469 6477 "fcp_icmd_callback,3: state change "
6470 6478 " occured for D_ID=0x%x", ptgt->tgt_d_id);
6471 6479 mutex_exit(&pptr->port_mutex);
6472 6480 }
6473 6481 }
6474 6482 } else {
6475 6483 if (fcp_is_retryable(icmd) &&
6476 6484 icmd->ipkt_retries++ < FCP_MAX_RETRIES) {
6477 6485 if (FCP_MUST_RETRY(fpkt)) {
6478 6486 fcp_queue_ipkt(pptr, fpkt);
6479 6487 return;
6480 6488 }
6481 6489 }
6482 6490 mutex_enter(&pptr->port_mutex);
6483 6491 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd) &&
6484 6492 fpkt->pkt_state != FC_PKT_PORT_OFFLINE) {
6485 6493 mutex_exit(&pptr->port_mutex);
6486 6494 fcp_print_error(fpkt);
6487 6495 } else {
6488 6496 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6489 6497 fcp_trace, FCP_BUF_LEVEL_2, 0,
6490 6498 "fcp_icmd_callback,4: state change occured"
6491 6499 " for D_ID=0x%x", ptgt->tgt_d_id);
6492 6500 mutex_exit(&pptr->port_mutex);
6493 6501 }
6494 6502 }
6495 6503
6496 6504 fail:
6497 6505 if (free_pkt) {
6498 6506 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6499 6507 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6500 6508 fcp_icmd_free(pptr, icmd);
6501 6509 }
6502 6510 }
6503 6511
6504 6512
6505 6513 /*
6506 6514 * called internally to send an info cmd using the transport
6507 6515 *
6508 6516 * sends either an INQ or a REPORT_LUN
6509 6517 *
6510 6518 * when the packet is completed fcp_scsi_callback is called
6511 6519 */
6512 6520 static int
6513 6521 fcp_send_scsi(struct fcp_lun *plun, uchar_t opcode, int alloc_len,
6514 6522 int lcount, int tcount, int cause, uint32_t rscn_count)
6515 6523 {
6516 6524 int nodma;
6517 6525 struct fcp_ipkt *icmd;
6518 6526 struct fcp_tgt *ptgt;
6519 6527 struct fcp_port *pptr;
6520 6528 fc_frame_hdr_t *hp;
6521 6529 fc_packet_t *fpkt;
6522 6530 struct fcp_cmd fcp_cmd;
6523 6531 struct fcp_cmd *fcmd;
6524 6532 union scsi_cdb *scsi_cdb;
6525 6533
6526 6534 ASSERT(plun != NULL);
6527 6535
6528 6536 ptgt = plun->lun_tgt;
6529 6537 ASSERT(ptgt != NULL);
6530 6538
6531 6539 pptr = ptgt->tgt_port;
6532 6540 ASSERT(pptr != NULL);
6533 6541
6534 6542 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6535 6543 fcp_trace, FCP_BUF_LEVEL_5, 0,
6536 6544 "fcp_send_scsi: d_id=0x%x opcode=0x%x", ptgt->tgt_d_id, opcode);
6537 6545
6538 6546 nodma = (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) ? 1 : 0;
6539 6547 icmd = fcp_icmd_alloc(pptr, ptgt, sizeof (struct fcp_cmd),
6540 6548 FCP_MAX_RSP_IU_SIZE, alloc_len, nodma, lcount, tcount, cause,
6541 6549 rscn_count);
6542 6550
6543 6551 if (icmd == NULL) {
6544 6552 return (DDI_FAILURE);
6545 6553 }
6546 6554
6547 6555 fpkt = icmd->ipkt_fpkt;
6548 6556 fpkt->pkt_tran_flags = FC_TRAN_CLASS3 | FC_TRAN_INTR;
6549 6557 icmd->ipkt_retries = 0;
6550 6558 icmd->ipkt_opcode = opcode;
6551 6559 icmd->ipkt_lun = plun;
6552 6560
6553 6561 if (nodma) {
6554 6562 fcmd = (struct fcp_cmd *)fpkt->pkt_cmd;
6555 6563 } else {
6556 6564 fcmd = &fcp_cmd;
6557 6565 }
6558 6566 bzero(fcmd, sizeof (struct fcp_cmd));
6559 6567
6560 6568 fpkt->pkt_timeout = FCP_SCSI_CMD_TIMEOUT;
6561 6569
6562 6570 hp = &fpkt->pkt_cmd_fhdr;
6563 6571
6564 6572 hp->s_id = pptr->port_id;
6565 6573 hp->d_id = ptgt->tgt_d_id;
6566 6574 hp->r_ctl = R_CTL_COMMAND;
6567 6575 hp->type = FC_TYPE_SCSI_FCP;
6568 6576 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
6569 6577 hp->rsvd = 0;
6570 6578 hp->seq_id = 0;
6571 6579 hp->seq_cnt = 0;
6572 6580 hp->ox_id = 0xffff;
6573 6581 hp->rx_id = 0xffff;
6574 6582 hp->ro = 0;
6575 6583
6576 6584 bcopy(&(plun->lun_addr), &(fcmd->fcp_ent_addr), FCP_LUN_SIZE);
6577 6585
6578 6586 /*
6579 6587 * Request SCSI target for expedited processing
6580 6588 */
6581 6589
6582 6590 /*
6583 6591 * Set up for untagged queuing because we do not
6584 6592 * know if the fibre device supports queuing.
6585 6593 */
6586 6594 fcmd->fcp_cntl.cntl_reserved_0 = 0;
6587 6595 fcmd->fcp_cntl.cntl_reserved_1 = 0;
6588 6596 fcmd->fcp_cntl.cntl_reserved_2 = 0;
6589 6597 fcmd->fcp_cntl.cntl_reserved_3 = 0;
6590 6598 fcmd->fcp_cntl.cntl_reserved_4 = 0;
6591 6599 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
6592 6600 scsi_cdb = (union scsi_cdb *)fcmd->fcp_cdb;
6593 6601
6594 6602 switch (opcode) {
6595 6603 case SCMD_INQUIRY_PAGE83:
6596 6604 /*
6597 6605 * Prepare to get the Inquiry VPD page 83 information
6598 6606 */
6599 6607 fcmd->fcp_cntl.cntl_read_data = 1;
6600 6608 fcmd->fcp_cntl.cntl_write_data = 0;
6601 6609 fcmd->fcp_data_len = alloc_len;
6602 6610
6603 6611 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6604 6612 fpkt->pkt_comp = fcp_scsi_callback;
6605 6613
6606 6614 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6607 6615 scsi_cdb->g0_addr2 = 0x01;
6608 6616 scsi_cdb->g0_addr1 = 0x83;
6609 6617 scsi_cdb->g0_count0 = (uchar_t)alloc_len;
6610 6618 break;
6611 6619
6612 6620 case SCMD_INQUIRY:
6613 6621 fcmd->fcp_cntl.cntl_read_data = 1;
6614 6622 fcmd->fcp_cntl.cntl_write_data = 0;
6615 6623 fcmd->fcp_data_len = alloc_len;
6616 6624
6617 6625 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6618 6626 fpkt->pkt_comp = fcp_scsi_callback;
6619 6627
6620 6628 scsi_cdb->scc_cmd = SCMD_INQUIRY;
6621 6629 scsi_cdb->g0_count0 = SUN_INQSIZE;
6622 6630 break;
6623 6631
6624 6632 case SCMD_REPORT_LUN: {
6625 6633 fc_portid_t d_id;
6626 6634 opaque_t fca_dev;
6627 6635
6628 6636 ASSERT(alloc_len >= 16);
6629 6637
6630 6638 d_id.priv_lilp_posit = 0;
6631 6639 d_id.port_id = ptgt->tgt_d_id;
6632 6640
6633 6641 fca_dev = fc_ulp_get_fca_device(pptr->port_fp_handle, d_id);
6634 6642
6635 6643 mutex_enter(&ptgt->tgt_mutex);
6636 6644 ptgt->tgt_fca_dev = fca_dev;
6637 6645 mutex_exit(&ptgt->tgt_mutex);
6638 6646
6639 6647 fcmd->fcp_cntl.cntl_read_data = 1;
6640 6648 fcmd->fcp_cntl.cntl_write_data = 0;
6641 6649 fcmd->fcp_data_len = alloc_len;
6642 6650
6643 6651 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
6644 6652 fpkt->pkt_comp = fcp_scsi_callback;
6645 6653
6646 6654 scsi_cdb->scc_cmd = SCMD_REPORT_LUN;
6647 6655 scsi_cdb->scc5_count0 = alloc_len & 0xff;
6648 6656 scsi_cdb->scc5_count1 = (alloc_len >> 8) & 0xff;
6649 6657 scsi_cdb->scc5_count2 = (alloc_len >> 16) & 0xff;
6650 6658 scsi_cdb->scc5_count3 = (alloc_len >> 24) & 0xff;
6651 6659 break;
6652 6660 }
6653 6661
6654 6662 default:
6655 6663 fcp_log(CE_WARN, pptr->port_dip,
6656 6664 "!fcp_send_scsi Invalid opcode");
6657 6665 break;
6658 6666 }
6659 6667
6660 6668 if (!nodma) {
6661 6669 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
6662 6670 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
6663 6671 }
6664 6672
6665 6673 mutex_enter(&pptr->port_mutex);
6666 6674 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
6667 6675
6668 6676 mutex_exit(&pptr->port_mutex);
6669 6677 if (fcp_transport(pptr->port_fp_handle, fpkt, 1) !=
6670 6678 FC_SUCCESS) {
6671 6679 fcp_icmd_free(pptr, icmd);
6672 6680 return (DDI_FAILURE);
6673 6681 }
6674 6682 return (DDI_SUCCESS);
6675 6683 } else {
6676 6684 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6677 6685 fcp_trace, FCP_BUF_LEVEL_2, 0,
6678 6686 "fcp_send_scsi,1: state change occured"
6679 6687 " for D_ID=0x%x", ptgt->tgt_d_id);
6680 6688 mutex_exit(&pptr->port_mutex);
6681 6689 fcp_icmd_free(pptr, icmd);
6682 6690 return (DDI_FAILURE);
6683 6691 }
6684 6692 }
6685 6693
6686 6694
6687 6695 /*
6688 6696 * called by fcp_scsi_callback to check to handle the case where
6689 6697 * REPORT_LUN returns ILLEGAL REQUEST or a UNIT ATTENTION
6690 6698 */
6691 6699 static int
6692 6700 fcp_check_reportlun(struct fcp_rsp *rsp, fc_packet_t *fpkt)
6693 6701 {
6694 6702 uchar_t rqlen;
6695 6703 int rval = DDI_FAILURE;
6696 6704 struct scsi_extended_sense sense_info, *sense;
6697 6705 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6698 6706 fpkt->pkt_ulp_private;
6699 6707 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
6700 6708 struct fcp_port *pptr = ptgt->tgt_port;
6701 6709
6702 6710 ASSERT(icmd->ipkt_opcode == SCMD_REPORT_LUN);
6703 6711
6704 6712 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_RESERVATION_CONFLICT) {
6705 6713 /*
6706 6714 * SCSI-II Reserve Release support. Some older FC drives return
6707 6715 * Reservation conflict for Report Luns command.
6708 6716 */
6709 6717 if (icmd->ipkt_nodma) {
6710 6718 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6711 6719 rsp->fcp_u.fcp_status.sense_len_set = 0;
6712 6720 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6713 6721 } else {
6714 6722 fcp_rsp_t new_resp;
6715 6723
6716 6724 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6717 6725 fpkt->pkt_resp_acc, sizeof (new_resp));
6718 6726
6719 6727 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6720 6728 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6721 6729 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6722 6730
6723 6731 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6724 6732 fpkt->pkt_resp_acc, sizeof (new_resp));
6725 6733 }
6726 6734
6727 6735 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6728 6736 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6729 6737
6730 6738 return (DDI_SUCCESS);
6731 6739 }
6732 6740
6733 6741 sense = &sense_info;
6734 6742 if (!rsp->fcp_u.fcp_status.sense_len_set) {
6735 6743 /* no need to continue if sense length is not set */
6736 6744 return (rval);
6737 6745 }
6738 6746
6739 6747 /* casting 64-bit integer to 8-bit */
6740 6748 rqlen = (uchar_t)min(rsp->fcp_sense_len,
6741 6749 sizeof (struct scsi_extended_sense));
6742 6750
6743 6751 if (rqlen < 14) {
6744 6752 /* no need to continue if request length isn't long enough */
6745 6753 return (rval);
6746 6754 }
6747 6755
6748 6756 if (icmd->ipkt_nodma) {
6749 6757 /*
6750 6758 * We can safely use fcp_response_len here since the
6751 6759 * only path that calls fcp_check_reportlun,
6752 6760 * fcp_scsi_callback, has already called
6753 6761 * fcp_validate_fcp_response.
6754 6762 */
6755 6763 sense = (struct scsi_extended_sense *)(fpkt->pkt_resp +
6756 6764 sizeof (struct fcp_rsp) + rsp->fcp_response_len);
6757 6765 } else {
6758 6766 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp) +
6759 6767 rsp->fcp_response_len, sense, fpkt->pkt_resp_acc,
6760 6768 sizeof (struct scsi_extended_sense));
6761 6769 }
6762 6770
6763 6771 if (!FCP_SENSE_NO_LUN(sense)) {
6764 6772 mutex_enter(&ptgt->tgt_mutex);
6765 6773 /* clear the flag if any */
6766 6774 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6767 6775 mutex_exit(&ptgt->tgt_mutex);
6768 6776 }
6769 6777
6770 6778 if ((sense->es_key == KEY_ILLEGAL_REQUEST) &&
6771 6779 (sense->es_add_code == 0x20)) {
6772 6780 if (icmd->ipkt_nodma) {
6773 6781 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6774 6782 rsp->fcp_u.fcp_status.sense_len_set = 0;
6775 6783 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6776 6784 } else {
6777 6785 fcp_rsp_t new_resp;
6778 6786
6779 6787 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6780 6788 fpkt->pkt_resp_acc, sizeof (new_resp));
6781 6789
6782 6790 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6783 6791 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6784 6792 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6785 6793
6786 6794 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6787 6795 fpkt->pkt_resp_acc, sizeof (new_resp));
6788 6796 }
6789 6797
6790 6798 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6791 6799 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6792 6800
6793 6801 return (DDI_SUCCESS);
6794 6802 }
6795 6803
6796 6804 /*
6797 6805 * This is for the STK library which returns a check condition,
6798 6806 * to indicate device is not ready, manual assistance needed.
6799 6807 * This is to a report lun command when the door is open.
6800 6808 */
6801 6809 if ((sense->es_key == KEY_NOT_READY) && (sense->es_add_code == 0x04)) {
6802 6810 if (icmd->ipkt_nodma) {
6803 6811 rsp->fcp_u.fcp_status.rsp_len_set = 0;
6804 6812 rsp->fcp_u.fcp_status.sense_len_set = 0;
6805 6813 rsp->fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6806 6814 } else {
6807 6815 fcp_rsp_t new_resp;
6808 6816
6809 6817 FCP_CP_IN(fpkt->pkt_resp, &new_resp,
6810 6818 fpkt->pkt_resp_acc, sizeof (new_resp));
6811 6819
6812 6820 new_resp.fcp_u.fcp_status.rsp_len_set = 0;
6813 6821 new_resp.fcp_u.fcp_status.sense_len_set = 0;
6814 6822 new_resp.fcp_u.fcp_status.scsi_status = STATUS_GOOD;
6815 6823
6816 6824 FCP_CP_OUT(&new_resp, fpkt->pkt_resp,
6817 6825 fpkt->pkt_resp_acc, sizeof (new_resp));
6818 6826 }
6819 6827
6820 6828 FCP_CP_OUT(fcp_dummy_lun, fpkt->pkt_data,
6821 6829 fpkt->pkt_data_acc, sizeof (fcp_dummy_lun));
6822 6830
6823 6831 return (DDI_SUCCESS);
6824 6832 }
6825 6833
6826 6834 if ((FCP_SENSE_REPORTLUN_CHANGED(sense)) ||
6827 6835 (FCP_SENSE_NO_LUN(sense))) {
6828 6836 mutex_enter(&ptgt->tgt_mutex);
6829 6837 if ((FCP_SENSE_NO_LUN(sense)) &&
6830 6838 (ptgt->tgt_state & FCP_TGT_ILLREQ)) {
6831 6839 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
6832 6840 mutex_exit(&ptgt->tgt_mutex);
6833 6841 /*
6834 6842 * reconfig was triggred by ILLEGAL REQUEST but
6835 6843 * got ILLEGAL REQUEST again
6836 6844 */
6837 6845 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6838 6846 fcp_trace, FCP_BUF_LEVEL_3, 0,
6839 6847 "!FCP: Unable to obtain Report Lun data"
6840 6848 " target=%x", ptgt->tgt_d_id);
6841 6849 } else {
6842 6850 if (ptgt->tgt_tid == NULL) {
6843 6851 timeout_id_t tid;
6844 6852 /*
6845 6853 * REPORT LUN data has changed. Kick off
6846 6854 * rediscovery
6847 6855 */
6848 6856 tid = timeout(fcp_reconfigure_luns,
6849 6857 (caddr_t)ptgt, (clock_t)drv_usectohz(1));
6850 6858
6851 6859 ptgt->tgt_tid = tid;
6852 6860 ptgt->tgt_state |= FCP_TGT_BUSY;
6853 6861 }
6854 6862 if (FCP_SENSE_NO_LUN(sense)) {
6855 6863 ptgt->tgt_state |= FCP_TGT_ILLREQ;
6856 6864 }
6857 6865 mutex_exit(&ptgt->tgt_mutex);
6858 6866 if (FCP_SENSE_REPORTLUN_CHANGED(sense)) {
6859 6867 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6860 6868 fcp_trace, FCP_BUF_LEVEL_3, 0,
6861 6869 "!FCP:Report Lun Has Changed"
6862 6870 " target=%x", ptgt->tgt_d_id);
6863 6871 } else if (FCP_SENSE_NO_LUN(sense)) {
6864 6872 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6865 6873 fcp_trace, FCP_BUF_LEVEL_3, 0,
6866 6874 "!FCP:LU Not Supported"
6867 6875 " target=%x", ptgt->tgt_d_id);
6868 6876 }
6869 6877 }
6870 6878 rval = DDI_SUCCESS;
6871 6879 }
6872 6880
6873 6881 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6874 6882 fcp_trace, FCP_BUF_LEVEL_5, 0,
6875 6883 "D_ID=%x, sense=%x, status=%x",
6876 6884 fpkt->pkt_cmd_fhdr.d_id, sense->es_key,
6877 6885 rsp->fcp_u.fcp_status.scsi_status);
6878 6886
6879 6887 return (rval);
6880 6888 }
6881 6889
6882 6890 /*
6883 6891 * Function: fcp_scsi_callback
6884 6892 *
6885 6893 * Description: This is the callback routine set by fcp_send_scsi() after
6886 6894 * it calls fcp_icmd_alloc(). The SCSI command completed here
6887 6895 * and autogenerated by FCP are: REPORT_LUN, INQUIRY and
6888 6896 * INQUIRY_PAGE83.
6889 6897 *
6890 6898 * Argument: *fpkt FC packet used to convey the command
6891 6899 *
6892 6900 * Return Value: None
6893 6901 */
6894 6902 static void
6895 6903 fcp_scsi_callback(fc_packet_t *fpkt)
6896 6904 {
6897 6905 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
6898 6906 fpkt->pkt_ulp_private;
6899 6907 struct fcp_rsp_info fcp_rsp_err, *bep;
6900 6908 struct fcp_port *pptr;
6901 6909 struct fcp_tgt *ptgt;
6902 6910 struct fcp_lun *plun;
6903 6911 struct fcp_rsp response, *rsp;
6904 6912
6905 6913 ptgt = icmd->ipkt_tgt;
6906 6914 pptr = ptgt->tgt_port;
6907 6915 plun = icmd->ipkt_lun;
6908 6916
6909 6917 if (icmd->ipkt_nodma) {
6910 6918 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
6911 6919 } else {
6912 6920 rsp = &response;
6913 6921 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
6914 6922 sizeof (struct fcp_rsp));
6915 6923 }
6916 6924
6917 6925 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6918 6926 fcp_trace, FCP_BUF_LEVEL_2, 0,
6919 6927 "SCSI callback state=0x%x for %x, op_code=0x%x, "
6920 6928 "status=%x, lun num=%x",
6921 6929 fpkt->pkt_state, ptgt->tgt_d_id, icmd->ipkt_opcode,
6922 6930 rsp->fcp_u.fcp_status.scsi_status, plun->lun_num);
6923 6931
6924 6932 /*
6925 6933 * Pre-init LUN GUID with NWWN if it is not a device that
6926 6934 * supports multiple luns and we know it's not page83
6927 6935 * compliant. Although using a NWWN is not lun unique,
6928 6936 * we will be fine since there is only one lun behind the taget
6929 6937 * in this case.
6930 6938 */
6931 6939 if ((plun->lun_guid_size == 0) &&
6932 6940 (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
6933 6941 (fcp_symmetric_device_probe(plun) == 0)) {
6934 6942
6935 6943 char ascii_wwn[FC_WWN_SIZE*2+1];
6936 6944 fcp_wwn_to_ascii(&ptgt->tgt_node_wwn.raw_wwn[0], ascii_wwn);
6937 6945 (void) fcp_copy_guid_2_lun_block(plun, ascii_wwn);
6938 6946 }
6939 6947
6940 6948 /*
6941 6949 * Some old FC tapes and FC <-> SCSI bridge devices return overrun
6942 6950 * when thay have more data than what is asked in CDB. An overrun
6943 6951 * is really when FCP_DL is smaller than the data length in CDB.
6944 6952 * In the case here we know that REPORT LUN command we formed within
6945 6953 * this binary has correct FCP_DL. So this OVERRUN is due to bad device
6946 6954 * behavior. In reality this is FC_SUCCESS.
6947 6955 */
6948 6956 if ((fpkt->pkt_state != FC_PKT_SUCCESS) &&
6949 6957 (fpkt->pkt_reason == FC_REASON_OVERRUN) &&
6950 6958 (icmd->ipkt_opcode == SCMD_REPORT_LUN)) {
6951 6959 fpkt->pkt_state = FC_PKT_SUCCESS;
6952 6960 }
6953 6961
6954 6962 if (fpkt->pkt_state != FC_PKT_SUCCESS) {
6955 6963 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6956 6964 fcp_trace, FCP_BUF_LEVEL_2, 0,
6957 6965 "icmd failed with state=0x%x for %x", fpkt->pkt_state,
6958 6966 ptgt->tgt_d_id);
6959 6967
6960 6968 if (fpkt->pkt_reason == FC_REASON_CRC_ERROR) {
6961 6969 /*
6962 6970 * Inquiry VPD page command on A5K SES devices would
6963 6971 * result in data CRC errors.
6964 6972 */
6965 6973 if (icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) {
6966 6974 (void) fcp_handle_page83(fpkt, icmd, 1);
6967 6975 return;
6968 6976 }
6969 6977 }
6970 6978 if (fpkt->pkt_state == FC_PKT_TIMEOUT ||
6971 6979 FCP_MUST_RETRY(fpkt)) {
6972 6980 fpkt->pkt_timeout += FCP_TIMEOUT_DELTA;
6973 6981 fcp_retry_scsi_cmd(fpkt);
6974 6982 return;
6975 6983 }
6976 6984
6977 6985 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
6978 6986 FCP_TGT_TRACE_20);
6979 6987
6980 6988 mutex_enter(&pptr->port_mutex);
6981 6989 mutex_enter(&ptgt->tgt_mutex);
6982 6990 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
6983 6991 mutex_exit(&ptgt->tgt_mutex);
6984 6992 mutex_exit(&pptr->port_mutex);
6985 6993 fcp_print_error(fpkt);
6986 6994 } else {
6987 6995 FCP_TRACE(fcp_logq, pptr->port_instbuf,
6988 6996 fcp_trace, FCP_BUF_LEVEL_2, 0,
6989 6997 "fcp_scsi_callback,1: state change occured"
6990 6998 " for D_ID=0x%x", ptgt->tgt_d_id);
6991 6999 mutex_exit(&ptgt->tgt_mutex);
6992 7000 mutex_exit(&pptr->port_mutex);
6993 7001 }
6994 7002 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
6995 7003 icmd->ipkt_change_cnt, icmd->ipkt_cause);
6996 7004 fcp_icmd_free(pptr, icmd);
6997 7005 return;
6998 7006 }
6999 7007
7000 7008 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt, FCP_TGT_TRACE_21);
7001 7009
7002 7010 mutex_enter(&pptr->port_mutex);
7003 7011 mutex_enter(&ptgt->tgt_mutex);
7004 7012 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7005 7013 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7006 7014 fcp_trace, FCP_BUF_LEVEL_2, 0,
7007 7015 "fcp_scsi_callback,2: state change occured"
7008 7016 " for D_ID=0x%x", ptgt->tgt_d_id);
7009 7017 mutex_exit(&ptgt->tgt_mutex);
7010 7018 mutex_exit(&pptr->port_mutex);
7011 7019 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7012 7020 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7013 7021 fcp_icmd_free(pptr, icmd);
7014 7022 return;
7015 7023 }
7016 7024 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7017 7025
7018 7026 mutex_exit(&ptgt->tgt_mutex);
7019 7027 mutex_exit(&pptr->port_mutex);
7020 7028
7021 7029 if (icmd->ipkt_nodma) {
7022 7030 bep = (struct fcp_rsp_info *)(fpkt->pkt_resp +
7023 7031 sizeof (struct fcp_rsp));
7024 7032 } else {
7025 7033 bep = &fcp_rsp_err;
7026 7034 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp), bep,
7027 7035 fpkt->pkt_resp_acc, sizeof (struct fcp_rsp_info));
7028 7036 }
7029 7037
7030 7038 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
7031 7039 fcp_retry_scsi_cmd(fpkt);
7032 7040 return;
7033 7041 }
7034 7042
7035 7043 if (rsp->fcp_u.fcp_status.rsp_len_set && bep->rsp_code !=
7036 7044 FCP_NO_FAILURE) {
7037 7045 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7038 7046 fcp_trace, FCP_BUF_LEVEL_2, 0,
7039 7047 "rsp_code=0x%x, rsp_len_set=0x%x",
7040 7048 bep->rsp_code, rsp->fcp_u.fcp_status.rsp_len_set);
7041 7049 fcp_retry_scsi_cmd(fpkt);
7042 7050 return;
7043 7051 }
7044 7052
7045 7053 if (rsp->fcp_u.fcp_status.scsi_status == STATUS_QFULL ||
7046 7054 rsp->fcp_u.fcp_status.scsi_status == STATUS_BUSY) {
7047 7055 fcp_queue_ipkt(pptr, fpkt);
7048 7056 return;
7049 7057 }
7050 7058
7051 7059 /*
7052 7060 * Devices that do not support INQUIRY_PAGE83, return check condition
7053 7061 * with illegal request as per SCSI spec.
7054 7062 * Crossbridge is one such device and Daktari's SES node is another.
7055 7063 * We want to ideally enumerate these devices as a non-mpxio devices.
7056 7064 * SES nodes (Daktari only currently) are an exception to this.
7057 7065 */
7058 7066 if ((icmd->ipkt_opcode == SCMD_INQUIRY_PAGE83) &&
7059 7067 (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK)) {
7060 7068
7061 7069 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7062 7070 fcp_trace, FCP_BUF_LEVEL_3, 0,
7063 7071 "INQUIRY_PAGE83 for d_id %x (dtype:0x%x) failed with "
7064 7072 "check condition. May enumerate as non-mpxio device",
7065 7073 ptgt->tgt_d_id, plun->lun_type);
7066 7074
7067 7075 /*
7068 7076 * If we let Daktari's SES be enumerated as a non-mpxio
7069 7077 * device, there will be a discrepency in that the other
7070 7078 * internal FC disks will get enumerated as mpxio devices.
7071 7079 * Applications like luxadm expect this to be consistent.
7072 7080 *
7073 7081 * So, we put in a hack here to check if this is an SES device
7074 7082 * and handle it here.
7075 7083 */
7076 7084 if (plun->lun_type == DTYPE_ESI) {
7077 7085 /*
7078 7086 * Since, pkt_state is actually FC_PKT_SUCCESS
7079 7087 * at this stage, we fake a failure here so that
7080 7088 * fcp_handle_page83 will create a device path using
7081 7089 * the WWN instead of the GUID which is not there anyway
7082 7090 */
7083 7091 fpkt->pkt_state = FC_PKT_LOCAL_RJT;
7084 7092 (void) fcp_handle_page83(fpkt, icmd, 1);
7085 7093 return;
7086 7094 }
7087 7095
7088 7096 mutex_enter(&ptgt->tgt_mutex);
7089 7097 plun->lun_state &= ~(FCP_LUN_OFFLINE |
7090 7098 FCP_LUN_MARK | FCP_LUN_BUSY);
7091 7099 mutex_exit(&ptgt->tgt_mutex);
7092 7100
7093 7101 (void) fcp_call_finish_init(pptr, ptgt,
7094 7102 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7095 7103 icmd->ipkt_cause);
7096 7104 fcp_icmd_free(pptr, icmd);
7097 7105 return;
7098 7106 }
7099 7107
7100 7108 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7101 7109 int rval = DDI_FAILURE;
7102 7110
7103 7111 /*
7104 7112 * handle cases where report lun isn't supported
7105 7113 * by faking up our own REPORT_LUN response or
7106 7114 * UNIT ATTENTION
7107 7115 */
7108 7116 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7109 7117 rval = fcp_check_reportlun(rsp, fpkt);
7110 7118
7111 7119 /*
7112 7120 * fcp_check_reportlun might have modified the
7113 7121 * FCP response. Copy it in again to get an updated
7114 7122 * FCP response
7115 7123 */
7116 7124 if (rval == DDI_SUCCESS && icmd->ipkt_nodma == 0) {
7117 7125 rsp = &response;
7118 7126
7119 7127 FCP_CP_IN(fpkt->pkt_resp, rsp,
7120 7128 fpkt->pkt_resp_acc,
7121 7129 sizeof (struct fcp_rsp));
7122 7130 }
7123 7131 }
7124 7132
7125 7133 if (rsp->fcp_u.fcp_status.scsi_status != STATUS_GOOD) {
7126 7134 if (rval == DDI_SUCCESS) {
7127 7135 (void) fcp_call_finish_init(pptr, ptgt,
7128 7136 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7129 7137 icmd->ipkt_cause);
7130 7138 fcp_icmd_free(pptr, icmd);
7131 7139 } else {
7132 7140 fcp_retry_scsi_cmd(fpkt);
7133 7141 }
7134 7142
7135 7143 return;
7136 7144 }
7137 7145 } else {
7138 7146 if (icmd->ipkt_opcode == SCMD_REPORT_LUN) {
7139 7147 mutex_enter(&ptgt->tgt_mutex);
7140 7148 ptgt->tgt_state &= ~FCP_TGT_ILLREQ;
7141 7149 mutex_exit(&ptgt->tgt_mutex);
7142 7150 }
7143 7151 }
7144 7152
7145 7153 ASSERT(rsp->fcp_u.fcp_status.scsi_status == STATUS_GOOD);
7146 7154 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
7147 7155 (void) ddi_dma_sync(fpkt->pkt_data_dma, 0, 0,
7148 7156 DDI_DMA_SYNC_FORCPU);
7149 7157 }
7150 7158
7151 7159 switch (icmd->ipkt_opcode) {
7152 7160 case SCMD_INQUIRY:
7153 7161 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_1);
7154 7162 fcp_handle_inquiry(fpkt, icmd);
7155 7163 break;
7156 7164
7157 7165 case SCMD_REPORT_LUN:
7158 7166 FCP_TGT_TRACE(ptgt, icmd->ipkt_change_cnt,
7159 7167 FCP_TGT_TRACE_22);
7160 7168 fcp_handle_reportlun(fpkt, icmd);
7161 7169 break;
7162 7170
7163 7171 case SCMD_INQUIRY_PAGE83:
7164 7172 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_2);
7165 7173 (void) fcp_handle_page83(fpkt, icmd, 0);
7166 7174 break;
7167 7175
7168 7176 default:
7169 7177 fcp_log(CE_WARN, NULL, "!Invalid SCSI opcode");
7170 7178 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7171 7179 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7172 7180 fcp_icmd_free(pptr, icmd);
7173 7181 break;
7174 7182 }
7175 7183 }
7176 7184
7177 7185
7178 7186 static void
7179 7187 fcp_retry_scsi_cmd(fc_packet_t *fpkt)
7180 7188 {
7181 7189 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
7182 7190 fpkt->pkt_ulp_private;
7183 7191 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
7184 7192 struct fcp_port *pptr = ptgt->tgt_port;
7185 7193
7186 7194 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
7187 7195 fcp_is_retryable(icmd)) {
7188 7196 mutex_enter(&pptr->port_mutex);
7189 7197 if (!FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7190 7198 mutex_exit(&pptr->port_mutex);
7191 7199 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7192 7200 fcp_trace, FCP_BUF_LEVEL_3, 0,
7193 7201 "Retrying %s to %x; state=%x, reason=%x",
7194 7202 (icmd->ipkt_opcode == SCMD_REPORT_LUN) ?
7195 7203 "Report LUN" : "INQUIRY", ptgt->tgt_d_id,
7196 7204 fpkt->pkt_state, fpkt->pkt_reason);
7197 7205
7198 7206 fcp_queue_ipkt(pptr, fpkt);
7199 7207 } else {
7200 7208 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7201 7209 fcp_trace, FCP_BUF_LEVEL_3, 0,
7202 7210 "fcp_retry_scsi_cmd,1: state change occured"
7203 7211 " for D_ID=0x%x", ptgt->tgt_d_id);
7204 7212 mutex_exit(&pptr->port_mutex);
7205 7213 (void) fcp_call_finish_init(pptr, ptgt,
7206 7214 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7207 7215 icmd->ipkt_cause);
7208 7216 fcp_icmd_free(pptr, icmd);
7209 7217 }
7210 7218 } else {
7211 7219 fcp_print_error(fpkt);
7212 7220 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7213 7221 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7214 7222 fcp_icmd_free(pptr, icmd);
7215 7223 }
7216 7224 }
7217 7225
7218 7226 /*
7219 7227 * Function: fcp_handle_page83
7220 7228 *
7221 7229 * Description: Treats the response to INQUIRY_PAGE83.
7222 7230 *
7223 7231 * Argument: *fpkt FC packet used to convey the command.
7224 7232 * *icmd Original fcp_ipkt structure.
7225 7233 * ignore_page83_data
7226 7234 * if it's 1, that means it's a special devices's
7227 7235 * page83 response, it should be enumerated under mpxio
7228 7236 *
7229 7237 * Return Value: None
7230 7238 */
7231 7239 static void
7232 7240 fcp_handle_page83(fc_packet_t *fpkt, struct fcp_ipkt *icmd,
7233 7241 int ignore_page83_data)
7234 7242 {
7235 7243 struct fcp_port *pptr;
7236 7244 struct fcp_lun *plun;
7237 7245 struct fcp_tgt *ptgt;
7238 7246 uchar_t dev_id_page[SCMD_MAX_INQUIRY_PAGE83_SIZE];
7239 7247 int fail = 0;
7240 7248 ddi_devid_t devid;
7241 7249 char *guid = NULL;
7242 7250 int ret;
7243 7251
7244 7252 ASSERT(icmd != NULL && fpkt != NULL);
7245 7253
7246 7254 pptr = icmd->ipkt_port;
7247 7255 ptgt = icmd->ipkt_tgt;
7248 7256 plun = icmd->ipkt_lun;
7249 7257
7250 7258 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
7251 7259 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_7);
7252 7260
7253 7261 FCP_CP_IN(fpkt->pkt_data, dev_id_page, fpkt->pkt_data_acc,
7254 7262 SCMD_MAX_INQUIRY_PAGE83_SIZE);
7255 7263
7256 7264 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7257 7265 fcp_trace, FCP_BUF_LEVEL_5, 0,
7258 7266 "fcp_handle_page83: port=%d, tgt D_ID=0x%x, "
7259 7267 "dtype=0x%x, lun num=%x",
7260 7268 pptr->port_instance, ptgt->tgt_d_id,
7261 7269 dev_id_page[0], plun->lun_num);
7262 7270
7263 7271 ret = ddi_devid_scsi_encode(
7264 7272 DEVID_SCSI_ENCODE_VERSION_LATEST,
7265 7273 NULL, /* driver name */
7266 7274 (unsigned char *) &plun->lun_inq, /* standard inquiry */
7267 7275 sizeof (plun->lun_inq), /* size of standard inquiry */
7268 7276 NULL, /* page 80 data */
7269 7277 0, /* page 80 len */
7270 7278 dev_id_page, /* page 83 data */
7271 7279 SCMD_MAX_INQUIRY_PAGE83_SIZE, /* page 83 data len */
7272 7280 &devid);
7273 7281
7274 7282 if (ret == DDI_SUCCESS) {
7275 7283
7276 7284 guid = ddi_devid_to_guid(devid);
7277 7285
7278 7286 if (guid) {
7279 7287 /*
7280 7288 * Check our current guid. If it's non null
7281 7289 * and it has changed, we need to copy it into
7282 7290 * lun_old_guid since we might still need it.
7283 7291 */
7284 7292 if (plun->lun_guid &&
7285 7293 strcmp(guid, plun->lun_guid)) {
7286 7294 unsigned int len;
7287 7295
7288 7296 /*
7289 7297 * If the guid of the LUN changes,
7290 7298 * reconfiguration should be triggered
7291 7299 * to reflect the changes.
7292 7300 * i.e. we should offline the LUN with
7293 7301 * the old guid, and online the LUN with
7294 7302 * the new guid.
7295 7303 */
7296 7304 plun->lun_state |= FCP_LUN_CHANGED;
7297 7305
7298 7306 if (plun->lun_old_guid) {
7299 7307 kmem_free(plun->lun_old_guid,
7300 7308 plun->lun_old_guid_size);
7301 7309 }
7302 7310
7303 7311 len = plun->lun_guid_size;
7304 7312 plun->lun_old_guid_size = len;
7305 7313
7306 7314 plun->lun_old_guid = kmem_zalloc(len,
7307 7315 KM_NOSLEEP);
7308 7316
7309 7317 if (plun->lun_old_guid) {
7310 7318 /*
7311 7319 * The alloc was successful then
7312 7320 * let's do the copy.
7313 7321 */
7314 7322 bcopy(plun->lun_guid,
7315 7323 plun->lun_old_guid, len);
7316 7324 } else {
7317 7325 fail = 1;
7318 7326 plun->lun_old_guid_size = 0;
7319 7327 }
7320 7328 }
7321 7329 if (!fail) {
7322 7330 if (fcp_copy_guid_2_lun_block(
7323 7331 plun, guid)) {
7324 7332 fail = 1;
7325 7333 }
7326 7334 }
7327 7335 ddi_devid_free_guid(guid);
7328 7336
7329 7337 } else {
7330 7338 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7331 7339 fcp_trace, FCP_BUF_LEVEL_2, 0,
7332 7340 "fcp_handle_page83: unable to create "
7333 7341 "GUID");
7334 7342
7335 7343 /* couldn't create good guid from devid */
7336 7344 fail = 1;
7337 7345 }
7338 7346 ddi_devid_free(devid);
7339 7347
7340 7348 } else if (ret == DDI_NOT_WELL_FORMED) {
7341 7349 /* NULL filled data for page 83 */
7342 7350 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7343 7351 fcp_trace, FCP_BUF_LEVEL_2, 0,
7344 7352 "fcp_handle_page83: retry GUID");
7345 7353
7346 7354 icmd->ipkt_retries = 0;
7347 7355 fcp_retry_scsi_cmd(fpkt);
7348 7356 return;
7349 7357 } else {
7350 7358 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7351 7359 fcp_trace, FCP_BUF_LEVEL_2, 0,
7352 7360 "fcp_handle_page83: bad ddi_devid_scsi_encode %x",
7353 7361 ret);
7354 7362 /*
7355 7363 * Since the page83 validation
7356 7364 * introduced late, we are being
7357 7365 * tolerant to the existing devices
7358 7366 * that already found to be working
7359 7367 * under mpxio, like A5200's SES device,
7360 7368 * its page83 response will not be standard-compliant,
7361 7369 * but we still want it to be enumerated under mpxio.
7362 7370 */
7363 7371 if (fcp_symmetric_device_probe(plun) != 0) {
7364 7372 fail = 1;
7365 7373 }
7366 7374 }
7367 7375
7368 7376 } else {
7369 7377 /* bad packet state */
7370 7378 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_8);
7371 7379
7372 7380 /*
7373 7381 * For some special devices (A5K SES and Daktari's SES devices),
7374 7382 * they should be enumerated under mpxio
7375 7383 * or "luxadm dis" will fail
7376 7384 */
7377 7385 if (ignore_page83_data) {
7378 7386 fail = 0;
7379 7387 } else {
7380 7388 fail = 1;
7381 7389 }
7382 7390 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7383 7391 fcp_trace, FCP_BUF_LEVEL_2, 0,
7384 7392 "!Devid page cmd failed. "
7385 7393 "fpkt_state: %x fpkt_reason: %x",
7386 7394 "ignore_page83: %d",
7387 7395 fpkt->pkt_state, fpkt->pkt_reason,
7388 7396 ignore_page83_data);
7389 7397 }
7390 7398
7391 7399 mutex_enter(&pptr->port_mutex);
7392 7400 mutex_enter(&plun->lun_mutex);
7393 7401 /*
7394 7402 * If lun_cip is not NULL, then we needn't update lun_mpxio to avoid
7395 7403 * mismatch between lun_cip and lun_mpxio.
7396 7404 */
7397 7405 if (plun->lun_cip == NULL) {
7398 7406 /*
7399 7407 * If we don't have a guid for this lun it's because we were
7400 7408 * unable to glean one from the page 83 response. Set the
7401 7409 * control flag to 0 here to make sure that we don't attempt to
7402 7410 * enumerate it under mpxio.
7403 7411 */
7404 7412 if (fail || pptr->port_mpxio == 0) {
7405 7413 plun->lun_mpxio = 0;
7406 7414 } else {
7407 7415 plun->lun_mpxio = 1;
7408 7416 }
7409 7417 }
7410 7418 mutex_exit(&plun->lun_mutex);
7411 7419 mutex_exit(&pptr->port_mutex);
7412 7420
7413 7421 mutex_enter(&ptgt->tgt_mutex);
7414 7422 plun->lun_state &=
7415 7423 ~(FCP_LUN_OFFLINE | FCP_LUN_MARK | FCP_LUN_BUSY);
7416 7424 mutex_exit(&ptgt->tgt_mutex);
7417 7425
7418 7426 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7419 7427 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7420 7428
7421 7429 fcp_icmd_free(pptr, icmd);
7422 7430 }
7423 7431
7424 7432 /*
7425 7433 * Function: fcp_handle_inquiry
7426 7434 *
7427 7435 * Description: Called by fcp_scsi_callback to handle the response to an
7428 7436 * INQUIRY request.
7429 7437 *
7430 7438 * Argument: *fpkt FC packet used to convey the command.
7431 7439 * *icmd Original fcp_ipkt structure.
7432 7440 *
7433 7441 * Return Value: None
7434 7442 */
7435 7443 static void
7436 7444 fcp_handle_inquiry(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7437 7445 {
7438 7446 struct fcp_port *pptr;
7439 7447 struct fcp_lun *plun;
7440 7448 struct fcp_tgt *ptgt;
7441 7449 uchar_t dtype;
7442 7450 uchar_t pqual;
7443 7451 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7444 7452
7445 7453 ASSERT(icmd != NULL && fpkt != NULL);
7446 7454
7447 7455 pptr = icmd->ipkt_port;
7448 7456 ptgt = icmd->ipkt_tgt;
7449 7457 plun = icmd->ipkt_lun;
7450 7458
7451 7459 FCP_CP_IN(fpkt->pkt_data, &plun->lun_inq, fpkt->pkt_data_acc,
7452 7460 sizeof (struct scsi_inquiry));
7453 7461
7454 7462 dtype = plun->lun_inq.inq_dtype & DTYPE_MASK;
7455 7463 pqual = plun->lun_inq.inq_dtype >> 5;
7456 7464
7457 7465 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7458 7466 fcp_trace, FCP_BUF_LEVEL_5, 0,
7459 7467 "fcp_handle_inquiry: port=%d, tgt D_ID=0x%x, lun=0x%x, "
7460 7468 "dtype=0x%x pqual: 0x%x", pptr->port_instance, ptgt->tgt_d_id,
7461 7469 plun->lun_num, dtype, pqual);
7462 7470
7463 7471 if (pqual != 0) {
7464 7472 /*
7465 7473 * Non-zero peripheral qualifier
7466 7474 */
7467 7475 fcp_log(CE_CONT, pptr->port_dip,
7468 7476 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7469 7477 "Device type=0x%x Peripheral qual=0x%x\n",
7470 7478 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7471 7479
7472 7480 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7473 7481 fcp_trace, FCP_BUF_LEVEL_5, 0,
7474 7482 "!Target 0x%x lun 0x%x: Nonzero peripheral qualifier: "
7475 7483 "Device type=0x%x Peripheral qual=0x%x\n",
7476 7484 ptgt->tgt_d_id, plun->lun_num, dtype, pqual);
7477 7485
7478 7486 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_3);
7479 7487
7480 7488 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7481 7489 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7482 7490 fcp_icmd_free(pptr, icmd);
7483 7491 return;
7484 7492 }
7485 7493
7486 7494 /*
7487 7495 * If the device is already initialized, check the dtype
7488 7496 * for a change. If it has changed then update the flags
7489 7497 * so the create_luns will offline the old device and
7490 7498 * create the new device. Refer to bug: 4764752
7491 7499 */
7492 7500 if ((plun->lun_state & FCP_LUN_INIT) && dtype != plun->lun_type) {
7493 7501 plun->lun_state |= FCP_LUN_CHANGED;
7494 7502 }
7495 7503 plun->lun_type = plun->lun_inq.inq_dtype;
7496 7504
7497 7505 /*
7498 7506 * This code is setting/initializing the throttling in the FCA
7499 7507 * driver.
7500 7508 */
7501 7509 mutex_enter(&pptr->port_mutex);
7502 7510 if (!pptr->port_notify) {
7503 7511 if (bcmp(plun->lun_inq.inq_pid, pid, strlen(pid)) == 0) {
7504 7512 uint32_t cmd = 0;
7505 7513 cmd = ((cmd & 0xFF | FC_NOTIFY_THROTTLE) |
7506 7514 ((cmd & 0xFFFFFF00 >> 8) |
7507 7515 FCP_SVE_THROTTLE << 8));
7508 7516 pptr->port_notify = 1;
7509 7517 mutex_exit(&pptr->port_mutex);
7510 7518 (void) fc_ulp_port_notify(pptr->port_fp_handle, cmd);
7511 7519 mutex_enter(&pptr->port_mutex);
7512 7520 }
7513 7521 }
7514 7522
7515 7523 if (FCP_TGT_STATE_CHANGED(ptgt, icmd)) {
7516 7524 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7517 7525 fcp_trace, FCP_BUF_LEVEL_2, 0,
7518 7526 "fcp_handle_inquiry,1:state change occured"
7519 7527 " for D_ID=0x%x", ptgt->tgt_d_id);
7520 7528 mutex_exit(&pptr->port_mutex);
7521 7529
7522 7530 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_5);
7523 7531 (void) fcp_call_finish_init(pptr, ptgt,
7524 7532 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7525 7533 icmd->ipkt_cause);
7526 7534 fcp_icmd_free(pptr, icmd);
7527 7535 return;
7528 7536 }
7529 7537 ASSERT((ptgt->tgt_state & FCP_TGT_MARK) == 0);
7530 7538 mutex_exit(&pptr->port_mutex);
7531 7539
7532 7540 /* Retrieve the rscn count (if a valid one exists) */
7533 7541 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7534 7542 rscn_count = ((fc_ulp_rscn_info_t *)
7535 7543 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->ulp_rscn_count;
7536 7544 } else {
7537 7545 rscn_count = FC_INVALID_RSCN_COUNT;
7538 7546 }
7539 7547
7540 7548 if (fcp_send_scsi(plun, SCMD_INQUIRY_PAGE83,
7541 7549 SCMD_MAX_INQUIRY_PAGE83_SIZE,
7542 7550 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7543 7551 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7544 7552 fcp_log(CE_WARN, NULL, "!failed to send page 83");
7545 7553 FCP_LUN_TRACE(plun, FCP_LUN_TRACE_6);
7546 7554 (void) fcp_call_finish_init(pptr, ptgt,
7547 7555 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7548 7556 icmd->ipkt_cause);
7549 7557 }
7550 7558
7551 7559 /*
7552 7560 * Read Inquiry VPD Page 0x83 to uniquely
7553 7561 * identify this logical unit.
7554 7562 */
7555 7563 fcp_icmd_free(pptr, icmd);
7556 7564 }
7557 7565
7558 7566 /*
7559 7567 * Function: fcp_handle_reportlun
7560 7568 *
7561 7569 * Description: Called by fcp_scsi_callback to handle the response to a
7562 7570 * REPORT_LUN request.
7563 7571 *
7564 7572 * Argument: *fpkt FC packet used to convey the command.
7565 7573 * *icmd Original fcp_ipkt structure.
7566 7574 *
7567 7575 * Return Value: None
7568 7576 */
7569 7577 static void
7570 7578 fcp_handle_reportlun(fc_packet_t *fpkt, struct fcp_ipkt *icmd)
7571 7579 {
7572 7580 int i;
7573 7581 int nluns_claimed;
7574 7582 int nluns_bufmax;
7575 7583 int len;
7576 7584 uint16_t lun_num;
7577 7585 uint32_t rscn_count = FC_INVALID_RSCN_COUNT;
7578 7586 struct fcp_port *pptr;
7579 7587 struct fcp_tgt *ptgt;
7580 7588 struct fcp_lun *plun;
7581 7589 struct fcp_reportlun_resp *report_lun;
7582 7590
7583 7591 pptr = icmd->ipkt_port;
7584 7592 ptgt = icmd->ipkt_tgt;
7585 7593 len = fpkt->pkt_datalen;
7586 7594
7587 7595 if ((len < FCP_LUN_HEADER) ||
7588 7596 ((report_lun = kmem_zalloc(len, KM_NOSLEEP)) == NULL)) {
7589 7597 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7590 7598 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7591 7599 fcp_icmd_free(pptr, icmd);
7592 7600 return;
7593 7601 }
7594 7602
7595 7603 FCP_CP_IN(fpkt->pkt_data, report_lun, fpkt->pkt_data_acc,
7596 7604 fpkt->pkt_datalen);
7597 7605
7598 7606 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7599 7607 fcp_trace, FCP_BUF_LEVEL_5, 0,
7600 7608 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x",
7601 7609 pptr->port_instance, ptgt->tgt_d_id);
7602 7610
7603 7611 /*
7604 7612 * Get the number of luns (which is supplied as LUNS * 8) the
7605 7613 * device claims it has.
7606 7614 */
7607 7615 nluns_claimed = BE_32(report_lun->num_lun) >> 3;
7608 7616
7609 7617 /*
7610 7618 * Get the maximum number of luns the buffer submitted can hold.
7611 7619 */
7612 7620 nluns_bufmax = (fpkt->pkt_datalen - FCP_LUN_HEADER) / FCP_LUN_SIZE;
7613 7621
7614 7622 /*
7615 7623 * Due to limitations of certain hardware, we support only 16 bit LUNs
7616 7624 */
7617 7625 if (nluns_claimed > FCP_MAX_LUNS_SUPPORTED) {
7618 7626 kmem_free(report_lun, len);
7619 7627
7620 7628 fcp_log(CE_NOTE, pptr->port_dip, "!Can not support"
7621 7629 " 0x%x number of LUNs for target=%x", nluns_claimed,
7622 7630 ptgt->tgt_d_id);
7623 7631
7624 7632 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7625 7633 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7626 7634 fcp_icmd_free(pptr, icmd);
7627 7635 return;
7628 7636 }
7629 7637
7630 7638 /*
7631 7639 * If there are more LUNs than we have allocated memory for,
7632 7640 * allocate more space and send down yet another report lun if
7633 7641 * the maximum number of attempts hasn't been reached.
7634 7642 */
7635 7643 mutex_enter(&ptgt->tgt_mutex);
7636 7644
7637 7645 if ((nluns_claimed > nluns_bufmax) &&
7638 7646 (ptgt->tgt_report_lun_cnt < FCP_MAX_REPORTLUNS_ATTEMPTS)) {
7639 7647
7640 7648 struct fcp_lun *plun;
7641 7649
7642 7650 ptgt->tgt_report_lun_cnt++;
7643 7651 plun = ptgt->tgt_lun;
7644 7652 ASSERT(plun != NULL);
7645 7653 mutex_exit(&ptgt->tgt_mutex);
7646 7654
7647 7655 kmem_free(report_lun, len);
7648 7656
7649 7657 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7650 7658 fcp_trace, FCP_BUF_LEVEL_5, 0,
7651 7659 "!Dynamically discovered %d LUNs for D_ID=%x",
7652 7660 nluns_claimed, ptgt->tgt_d_id);
7653 7661
7654 7662 /* Retrieve the rscn count (if a valid one exists) */
7655 7663 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7656 7664 rscn_count = ((fc_ulp_rscn_info_t *)
7657 7665 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7658 7666 ulp_rscn_count;
7659 7667 } else {
7660 7668 rscn_count = FC_INVALID_RSCN_COUNT;
7661 7669 }
7662 7670
7663 7671 if (fcp_send_scsi(icmd->ipkt_lun, SCMD_REPORT_LUN,
7664 7672 FCP_LUN_HEADER + (nluns_claimed * FCP_LUN_SIZE),
7665 7673 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7666 7674 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7667 7675 (void) fcp_call_finish_init(pptr, ptgt,
7668 7676 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7669 7677 icmd->ipkt_cause);
7670 7678 }
7671 7679
7672 7680 fcp_icmd_free(pptr, icmd);
7673 7681 return;
7674 7682 }
7675 7683
7676 7684 if (nluns_claimed > nluns_bufmax) {
7677 7685 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7678 7686 fcp_trace, FCP_BUF_LEVEL_5, 0,
7679 7687 "Target=%x:%x:%x:%x:%x:%x:%x:%x"
7680 7688 " Number of LUNs lost=%x",
7681 7689 ptgt->tgt_port_wwn.raw_wwn[0],
7682 7690 ptgt->tgt_port_wwn.raw_wwn[1],
7683 7691 ptgt->tgt_port_wwn.raw_wwn[2],
7684 7692 ptgt->tgt_port_wwn.raw_wwn[3],
7685 7693 ptgt->tgt_port_wwn.raw_wwn[4],
7686 7694 ptgt->tgt_port_wwn.raw_wwn[5],
7687 7695 ptgt->tgt_port_wwn.raw_wwn[6],
7688 7696 ptgt->tgt_port_wwn.raw_wwn[7],
7689 7697 nluns_claimed - nluns_bufmax);
7690 7698
7691 7699 nluns_claimed = nluns_bufmax;
7692 7700 }
7693 7701 ptgt->tgt_lun_cnt = nluns_claimed;
7694 7702
7695 7703 /*
7696 7704 * Identify missing LUNs and print warning messages
7697 7705 */
7698 7706 for (plun = ptgt->tgt_lun; plun; plun = plun->lun_next) {
7699 7707 int offline;
7700 7708 int exists = 0;
7701 7709
7702 7710 offline = (plun->lun_state & FCP_LUN_OFFLINE) ? 1 : 0;
7703 7711
7704 7712 for (i = 0; i < nluns_claimed && exists == 0; i++) {
7705 7713 uchar_t *lun_string;
7706 7714
7707 7715 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7708 7716
7709 7717 switch (lun_string[0] & 0xC0) {
7710 7718 case FCP_LUN_ADDRESSING:
7711 7719 case FCP_PD_ADDRESSING:
7712 7720 case FCP_VOLUME_ADDRESSING:
7713 7721 lun_num = ((lun_string[0] & 0x3F) << 8) |
7714 7722 lun_string[1];
7715 7723 if (plun->lun_num == lun_num) {
7716 7724 exists++;
7717 7725 break;
7718 7726 }
7719 7727 break;
7720 7728
7721 7729 default:
7722 7730 break;
7723 7731 }
7724 7732 }
7725 7733
7726 7734 if (!exists && !offline) {
7727 7735 mutex_exit(&ptgt->tgt_mutex);
7728 7736
7729 7737 mutex_enter(&pptr->port_mutex);
7730 7738 mutex_enter(&ptgt->tgt_mutex);
7731 7739 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7732 7740 /*
7733 7741 * set disappear flag when device was connected
7734 7742 */
7735 7743 if (!(plun->lun_state &
7736 7744 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7737 7745 plun->lun_state |= FCP_LUN_DISAPPEARED;
7738 7746 }
7739 7747 mutex_exit(&ptgt->tgt_mutex);
7740 7748 mutex_exit(&pptr->port_mutex);
7741 7749 if (!(plun->lun_state &
7742 7750 FCP_LUN_DEVICE_NOT_CONNECTED)) {
7743 7751 fcp_log(CE_NOTE, pptr->port_dip,
7744 7752 "!Lun=%x for target=%x disappeared",
7745 7753 plun->lun_num, ptgt->tgt_d_id);
7746 7754 }
7747 7755 mutex_enter(&ptgt->tgt_mutex);
7748 7756 } else {
7749 7757 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7750 7758 fcp_trace, FCP_BUF_LEVEL_5, 0,
7751 7759 "fcp_handle_reportlun,1: state change"
7752 7760 " occured for D_ID=0x%x", ptgt->tgt_d_id);
7753 7761 mutex_exit(&ptgt->tgt_mutex);
7754 7762 mutex_exit(&pptr->port_mutex);
7755 7763 kmem_free(report_lun, len);
7756 7764 (void) fcp_call_finish_init(pptr, ptgt,
7757 7765 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7758 7766 icmd->ipkt_cause);
7759 7767 fcp_icmd_free(pptr, icmd);
7760 7768 return;
7761 7769 }
7762 7770 } else if (exists) {
7763 7771 /*
7764 7772 * clear FCP_LUN_DEVICE_NOT_CONNECTED when lun 0
7765 7773 * actually exists in REPORT_LUN response
7766 7774 */
7767 7775 if (plun->lun_state & FCP_LUN_DEVICE_NOT_CONNECTED) {
7768 7776 plun->lun_state &=
7769 7777 ~FCP_LUN_DEVICE_NOT_CONNECTED;
7770 7778 }
7771 7779 if (offline || plun->lun_num == 0) {
7772 7780 if (plun->lun_state & FCP_LUN_DISAPPEARED) {
7773 7781 plun->lun_state &= ~FCP_LUN_DISAPPEARED;
7774 7782 mutex_exit(&ptgt->tgt_mutex);
7775 7783 fcp_log(CE_NOTE, pptr->port_dip,
7776 7784 "!Lun=%x for target=%x reappeared",
7777 7785 plun->lun_num, ptgt->tgt_d_id);
7778 7786 mutex_enter(&ptgt->tgt_mutex);
7779 7787 }
7780 7788 }
7781 7789 }
7782 7790 }
7783 7791
7784 7792 ptgt->tgt_tmp_cnt = nluns_claimed ? nluns_claimed : 1;
7785 7793 mutex_exit(&ptgt->tgt_mutex);
7786 7794
7787 7795 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7788 7796 fcp_trace, FCP_BUF_LEVEL_5, 0,
7789 7797 "fcp_handle_reportlun: port=%d, tgt D_ID=0x%x, %d LUN(s)",
7790 7798 pptr->port_instance, ptgt->tgt_d_id, nluns_claimed);
7791 7799
7792 7800 /* scan each lun */
7793 7801 for (i = 0; i < nluns_claimed; i++) {
7794 7802 uchar_t *lun_string;
7795 7803
7796 7804 lun_string = (uchar_t *)&(report_lun->lun_string[i]);
7797 7805
7798 7806 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7799 7807 fcp_trace, FCP_BUF_LEVEL_5, 0,
7800 7808 "handle_reportlun: d_id=%x, LUN ind=%d, LUN=%d,"
7801 7809 " addr=0x%x", ptgt->tgt_d_id, i, lun_string[1],
7802 7810 lun_string[0]);
7803 7811
7804 7812 switch (lun_string[0] & 0xC0) {
7805 7813 case FCP_LUN_ADDRESSING:
7806 7814 case FCP_PD_ADDRESSING:
7807 7815 case FCP_VOLUME_ADDRESSING:
7808 7816 lun_num = ((lun_string[0] & 0x3F) << 8) | lun_string[1];
7809 7817
7810 7818 /* We will skip masked LUNs because of the blacklist. */
7811 7819 if (fcp_lun_blacklist != NULL) {
7812 7820 mutex_enter(&ptgt->tgt_mutex);
7813 7821 if (fcp_should_mask(&ptgt->tgt_port_wwn,
7814 7822 lun_num) == TRUE) {
7815 7823 ptgt->tgt_lun_cnt--;
7816 7824 mutex_exit(&ptgt->tgt_mutex);
7817 7825 break;
7818 7826 }
7819 7827 mutex_exit(&ptgt->tgt_mutex);
7820 7828 }
7821 7829
7822 7830 /* see if this LUN is already allocated */
7823 7831 if ((plun = fcp_get_lun(ptgt, lun_num)) == NULL) {
7824 7832 plun = fcp_alloc_lun(ptgt);
7825 7833 if (plun == NULL) {
7826 7834 fcp_log(CE_NOTE, pptr->port_dip,
7827 7835 "!Lun allocation failed"
7828 7836 " target=%x lun=%x",
7829 7837 ptgt->tgt_d_id, lun_num);
7830 7838 break;
7831 7839 }
7832 7840 }
7833 7841
7834 7842 mutex_enter(&plun->lun_tgt->tgt_mutex);
7835 7843 /* convert to LUN */
7836 7844 plun->lun_addr.ent_addr_0 =
7837 7845 BE_16(*(uint16_t *)&(lun_string[0]));
7838 7846 plun->lun_addr.ent_addr_1 =
7839 7847 BE_16(*(uint16_t *)&(lun_string[2]));
7840 7848 plun->lun_addr.ent_addr_2 =
7841 7849 BE_16(*(uint16_t *)&(lun_string[4]));
7842 7850 plun->lun_addr.ent_addr_3 =
7843 7851 BE_16(*(uint16_t *)&(lun_string[6]));
7844 7852
7845 7853 plun->lun_num = lun_num;
7846 7854 plun->lun_state |= FCP_LUN_BUSY | FCP_LUN_MARK;
7847 7855 plun->lun_state &= ~FCP_LUN_OFFLINE;
7848 7856 mutex_exit(&plun->lun_tgt->tgt_mutex);
7849 7857
7850 7858 /* Retrieve the rscn count (if a valid one exists) */
7851 7859 if (icmd->ipkt_fpkt->pkt_ulp_rscn_infop != NULL) {
7852 7860 rscn_count = ((fc_ulp_rscn_info_t *)
7853 7861 (icmd->ipkt_fpkt->pkt_ulp_rscn_infop))->
7854 7862 ulp_rscn_count;
7855 7863 } else {
7856 7864 rscn_count = FC_INVALID_RSCN_COUNT;
7857 7865 }
7858 7866
7859 7867 if (fcp_send_scsi(plun, SCMD_INQUIRY, SUN_INQSIZE,
7860 7868 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
7861 7869 icmd->ipkt_cause, rscn_count) != DDI_SUCCESS) {
7862 7870 mutex_enter(&pptr->port_mutex);
7863 7871 mutex_enter(&plun->lun_tgt->tgt_mutex);
7864 7872 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
7865 7873 fcp_log(CE_NOTE, pptr->port_dip,
7866 7874 "!failed to send INQUIRY"
7867 7875 " target=%x lun=%x",
7868 7876 ptgt->tgt_d_id, plun->lun_num);
7869 7877 } else {
7870 7878 FCP_TRACE(fcp_logq,
7871 7879 pptr->port_instbuf, fcp_trace,
7872 7880 FCP_BUF_LEVEL_5, 0,
7873 7881 "fcp_handle_reportlun,2: state"
7874 7882 " change occured for D_ID=0x%x",
7875 7883 ptgt->tgt_d_id);
7876 7884 }
7877 7885 mutex_exit(&plun->lun_tgt->tgt_mutex);
7878 7886 mutex_exit(&pptr->port_mutex);
7879 7887 } else {
7880 7888 continue;
7881 7889 }
7882 7890 break;
7883 7891
7884 7892 default:
7885 7893 fcp_log(CE_WARN, NULL,
7886 7894 "!Unsupported LUN Addressing method %x "
7887 7895 "in response to REPORT_LUN", lun_string[0]);
7888 7896 break;
7889 7897 }
7890 7898
7891 7899 /*
7892 7900 * each time through this loop we should decrement
7893 7901 * the tmp_cnt by one -- since we go through this loop
7894 7902 * one time for each LUN, the tmp_cnt should never be <=0
7895 7903 */
7896 7904 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7897 7905 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7898 7906 }
7899 7907
7900 7908 if (i == 0) {
7901 7909 fcp_log(CE_WARN, pptr->port_dip,
7902 7910 "!FCP: target=%x reported NO Luns", ptgt->tgt_d_id);
7903 7911 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
7904 7912 icmd->ipkt_change_cnt, icmd->ipkt_cause);
7905 7913 }
7906 7914
7907 7915 kmem_free(report_lun, len);
7908 7916 fcp_icmd_free(pptr, icmd);
7909 7917 }
7910 7918
7911 7919
7912 7920 /*
7913 7921 * called internally to return a LUN given a target and a LUN number
7914 7922 */
7915 7923 static struct fcp_lun *
7916 7924 fcp_get_lun(struct fcp_tgt *ptgt, uint16_t lun_num)
7917 7925 {
7918 7926 struct fcp_lun *plun;
7919 7927
7920 7928 mutex_enter(&ptgt->tgt_mutex);
7921 7929 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
7922 7930 if (plun->lun_num == lun_num) {
7923 7931 mutex_exit(&ptgt->tgt_mutex);
7924 7932 return (plun);
7925 7933 }
7926 7934 }
7927 7935 mutex_exit(&ptgt->tgt_mutex);
7928 7936
7929 7937 return (NULL);
7930 7938 }
7931 7939
7932 7940
7933 7941 /*
7934 7942 * handle finishing one target for fcp_finish_init
7935 7943 *
7936 7944 * return true (non-zero) if we want finish_init to continue with the
7937 7945 * next target
7938 7946 *
7939 7947 * called with the port mutex held
7940 7948 */
7941 7949 /*ARGSUSED*/
7942 7950 static int
7943 7951 fcp_finish_tgt(struct fcp_port *pptr, struct fcp_tgt *ptgt,
7944 7952 int link_cnt, int tgt_cnt, int cause)
7945 7953 {
7946 7954 int rval = 1;
7947 7955 ASSERT(pptr != NULL);
7948 7956 ASSERT(ptgt != NULL);
7949 7957
7950 7958 FCP_TRACE(fcp_logq, pptr->port_instbuf,
7951 7959 fcp_trace, FCP_BUF_LEVEL_5, 0,
7952 7960 "finish_tgt: D_ID/state = 0x%x/0x%x", ptgt->tgt_d_id,
7953 7961 ptgt->tgt_state);
7954 7962
7955 7963 ASSERT(mutex_owned(&pptr->port_mutex));
7956 7964
7957 7965 if ((pptr->port_link_cnt != link_cnt) ||
7958 7966 (tgt_cnt && ptgt->tgt_change_cnt != tgt_cnt)) {
7959 7967 /*
7960 7968 * oh oh -- another link reset or target change
7961 7969 * must have occurred while we are in here
7962 7970 */
7963 7971 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_23);
7964 7972
7965 7973 return (0);
7966 7974 } else {
7967 7975 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_24);
7968 7976 }
7969 7977
7970 7978 mutex_enter(&ptgt->tgt_mutex);
7971 7979
7972 7980 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
7973 7981 /*
7974 7982 * tgt is not offline -- is it marked (i.e. needs
7975 7983 * to be offlined) ??
7976 7984 */
7977 7985 if (ptgt->tgt_state & FCP_TGT_MARK) {
7978 7986 /*
7979 7987 * this target not offline *and*
7980 7988 * marked
7981 7989 */
7982 7990 ptgt->tgt_state &= ~FCP_TGT_MARK;
7983 7991 rval = fcp_offline_target(pptr, ptgt, link_cnt,
7984 7992 tgt_cnt, 0, 0);
7985 7993 } else {
7986 7994 ptgt->tgt_state &= ~FCP_TGT_BUSY;
7987 7995
7988 7996 /* create the LUNs */
7989 7997 if (ptgt->tgt_node_state != FCP_TGT_NODE_ON_DEMAND) {
7990 7998 ptgt->tgt_node_state = FCP_TGT_NODE_PRESENT;
7991 7999 fcp_create_luns(ptgt, link_cnt, tgt_cnt,
7992 8000 cause);
7993 8001 ptgt->tgt_device_created = 1;
7994 8002 } else {
7995 8003 fcp_update_tgt_state(ptgt, FCP_RESET,
7996 8004 FCP_LUN_BUSY);
7997 8005 }
7998 8006 }
7999 8007 }
8000 8008
8001 8009 mutex_exit(&ptgt->tgt_mutex);
8002 8010
8003 8011 return (rval);
8004 8012 }
8005 8013
8006 8014
8007 8015 /*
8008 8016 * this routine is called to finish port initialization
8009 8017 *
8010 8018 * Each port has a "temp" counter -- when a state change happens (e.g.
8011 8019 * port online), the temp count is set to the number of devices in the map.
8012 8020 * Then, as each device gets "discovered", the temp counter is decremented
8013 8021 * by one. When this count reaches zero we know that all of the devices
8014 8022 * in the map have been discovered (or an error has occurred), so we can
8015 8023 * then finish initialization -- which is done by this routine (well, this
8016 8024 * and fcp-finish_tgt())
8017 8025 *
8018 8026 * acquires and releases the global mutex
8019 8027 *
8020 8028 * called with the port mutex owned
8021 8029 */
8022 8030 static void
8023 8031 fcp_finish_init(struct fcp_port *pptr)
8024 8032 {
8025 8033 #ifdef DEBUG
8026 8034 bzero(pptr->port_finish_stack, sizeof (pptr->port_finish_stack));
8027 8035 pptr->port_finish_depth = getpcstack(pptr->port_finish_stack,
8028 8036 FCP_STACK_DEPTH);
8029 8037 #endif /* DEBUG */
8030 8038
8031 8039 ASSERT(mutex_owned(&pptr->port_mutex));
8032 8040
8033 8041 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8034 8042 fcp_trace, FCP_BUF_LEVEL_2, 0, "finish_init:"
8035 8043 " entering; ipkt count=%d", pptr->port_ipkt_cnt);
8036 8044
8037 8045 if ((pptr->port_state & FCP_STATE_ONLINING) &&
8038 8046 !(pptr->port_state & (FCP_STATE_SUSPENDED |
8039 8047 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
8040 8048 pptr->port_state &= ~FCP_STATE_ONLINING;
8041 8049 pptr->port_state |= FCP_STATE_ONLINE;
8042 8050 }
8043 8051
8044 8052 /* Wake up threads waiting on config done */
8045 8053 cv_broadcast(&pptr->port_config_cv);
8046 8054 }
8047 8055
8048 8056
8049 8057 /*
8050 8058 * called from fcp_finish_init to create the LUNs for a target
8051 8059 *
8052 8060 * called with the port mutex owned
8053 8061 */
8054 8062 static void
8055 8063 fcp_create_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt, int cause)
8056 8064 {
8057 8065 struct fcp_lun *plun;
8058 8066 struct fcp_port *pptr;
8059 8067 child_info_t *cip = NULL;
8060 8068
8061 8069 ASSERT(ptgt != NULL);
8062 8070 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8063 8071
8064 8072 pptr = ptgt->tgt_port;
8065 8073
8066 8074 ASSERT(pptr != NULL);
8067 8075
8068 8076 /* scan all LUNs for this target */
8069 8077 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8070 8078 if (plun->lun_state & FCP_LUN_OFFLINE) {
8071 8079 continue;
8072 8080 }
8073 8081
8074 8082 if (plun->lun_state & FCP_LUN_MARK) {
8075 8083 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8076 8084 fcp_trace, FCP_BUF_LEVEL_2, 0,
8077 8085 "fcp_create_luns: offlining marked LUN!");
8078 8086 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, 0);
8079 8087 continue;
8080 8088 }
8081 8089
8082 8090 plun->lun_state &= ~FCP_LUN_BUSY;
8083 8091
8084 8092 /*
8085 8093 * There are conditions in which FCP_LUN_INIT flag is cleared
8086 8094 * but we have a valid plun->lun_cip. To cover this case also
8087 8095 * CLEAR_BUSY whenever we have a valid lun_cip.
8088 8096 */
8089 8097 if (plun->lun_mpxio && plun->lun_cip &&
8090 8098 (!fcp_pass_to_hp(pptr, plun, plun->lun_cip,
8091 8099 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8092 8100 0, 0))) {
8093 8101 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8094 8102 fcp_trace, FCP_BUF_LEVEL_2, 0,
8095 8103 "fcp_create_luns: enable lun %p failed!",
8096 8104 plun);
8097 8105 }
8098 8106
8099 8107 if (plun->lun_state & FCP_LUN_INIT &&
8100 8108 !(plun->lun_state & FCP_LUN_CHANGED)) {
8101 8109 continue;
8102 8110 }
8103 8111
8104 8112 if (cause == FCP_CAUSE_USER_CREATE) {
8105 8113 continue;
8106 8114 }
8107 8115
8108 8116 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8109 8117 fcp_trace, FCP_BUF_LEVEL_6, 0,
8110 8118 "create_luns: passing ONLINE elem to HP thread");
8111 8119
8112 8120 /*
8113 8121 * If lun has changed, prepare for offlining the old path.
8114 8122 * Do not offline the old path right now, since it may be
8115 8123 * still opened.
8116 8124 */
8117 8125 if (plun->lun_cip && (plun->lun_state & FCP_LUN_CHANGED)) {
8118 8126 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8119 8127 }
8120 8128
8121 8129 /* pass an ONLINE element to the hotplug thread */
8122 8130 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8123 8131 link_cnt, tgt_cnt, NDI_ONLINE_ATTACH, 0)) {
8124 8132
8125 8133 /*
8126 8134 * We can not synchronous attach (i.e pass
8127 8135 * NDI_ONLINE_ATTACH) here as we might be
8128 8136 * coming from an interrupt or callback
8129 8137 * thread.
8130 8138 */
8131 8139 if (!fcp_pass_to_hp(pptr, plun, cip, FCP_ONLINE,
8132 8140 link_cnt, tgt_cnt, 0, 0)) {
8133 8141 fcp_log(CE_CONT, pptr->port_dip,
8134 8142 "Can not ONLINE LUN; D_ID=%x, LUN=%x\n",
8135 8143 plun->lun_tgt->tgt_d_id, plun->lun_num);
8136 8144 }
8137 8145 }
8138 8146 }
8139 8147 }
8140 8148
8141 8149
8142 8150 /*
8143 8151 * function to online/offline devices
|
↓ open down ↓ |
4265 lines elided |
↑ open up ↑ |
8144 8152 */
8145 8153 static int
8146 8154 fcp_trigger_lun(struct fcp_lun *plun, child_info_t *cip, int old_mpxio,
8147 8155 int online, int lcount, int tcount, int flags)
8148 8156 {
8149 8157 int rval = NDI_FAILURE;
8150 8158 int circ;
8151 8159 child_info_t *ccip;
8152 8160 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8153 8161 int is_mpxio = pptr->port_mpxio;
8154 - dev_info_t *cdip, *pdip;
8155 - char *devname;
8156 8162
8157 8163 if ((old_mpxio != 0) && (plun->lun_mpxio != old_mpxio)) {
8158 8164 /*
8159 8165 * When this event gets serviced, lun_cip and lun_mpxio
8160 8166 * has changed, so it should be invalidated now.
8161 8167 */
8162 8168 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
8163 8169 FCP_BUF_LEVEL_2, 0, "fcp_trigger_lun: lun_mpxio changed: "
8164 8170 "plun: %p, cip: %p, what:%d", plun, cip, online);
8165 8171 return (rval);
8166 8172 }
8167 8173
8168 8174 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8169 8175 fcp_trace, FCP_BUF_LEVEL_2, 0,
8170 8176 "fcp_trigger_lun: plun=%p target=%x lun=%d cip=%p what=%x "
8171 8177 "flags=%x mpxio=%x\n",
8172 8178 plun, LUN_TGT->tgt_d_id, plun->lun_num, cip, online, flags,
8173 8179 plun->lun_mpxio);
8174 8180
8175 8181 /*
8176 8182 * lun_mpxio needs checking here because we can end up in a race
8177 8183 * condition where this task has been dispatched while lun_mpxio is
8178 8184 * set, but an earlier FCP_ONLINE task for the same LUN tried to
8179 8185 * enable MPXIO for the LUN, but was unable to, and hence cleared
8180 8186 * the flag. We rely on the serialization of the tasks here. We return
8181 8187 * NDI_SUCCESS so any callers continue without reporting spurious
8182 8188 * errors, and the still think we're an MPXIO LUN.
8183 8189 */
8184 8190
|
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
8185 8191 if (online == FCP_MPXIO_PATH_CLEAR_BUSY ||
8186 8192 online == FCP_MPXIO_PATH_SET_BUSY) {
8187 8193 if (plun->lun_mpxio) {
8188 8194 rval = fcp_update_mpxio_path(plun, cip, online);
8189 8195 } else {
8190 8196 rval = NDI_SUCCESS;
8191 8197 }
8192 8198 return (rval);
8193 8199 }
8194 8200
8195 - /*
8196 - * Explicit devfs_clean() due to ndi_devi_offline() not
8197 - * executing devfs_clean() if parent lock is held.
8198 - */
8199 - ASSERT(!servicing_interrupt());
8200 - if (online == FCP_OFFLINE) {
8201 - if (plun->lun_mpxio == 0) {
8202 - if (plun->lun_cip == cip) {
8203 - cdip = DIP(plun->lun_cip);
8204 - } else {
8205 - cdip = DIP(cip);
8206 - }
8207 - } else if ((plun->lun_cip == cip) && plun->lun_cip) {
8208 - cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8209 - } else if ((plun->lun_cip != cip) && cip) {
8210 - /*
8211 - * This means a DTYPE/GUID change, we shall get the
8212 - * dip of the old cip instead of the current lun_cip.
8213 - */
8214 - cdip = mdi_pi_get_client(PIP(cip));
8215 - }
8216 - if (cdip) {
8217 - if (i_ddi_devi_attached(cdip)) {
8218 - pdip = ddi_get_parent(cdip);
8219 - devname = kmem_alloc(MAXNAMELEN + 1, KM_SLEEP);
8220 - ndi_devi_enter(pdip, &circ);
8221 - (void) ddi_deviname(cdip, devname);
8222 - /*
8223 - * Release parent lock before calling
8224 - * devfs_clean().
8225 - */
8226 - ndi_devi_exit(pdip, circ);
8227 - (void) devfs_clean(pdip, devname + 1,
8228 - DV_CLEAN_FORCE);
8229 - kmem_free(devname, MAXNAMELEN + 1);
8230 - }
8231 - }
8232 - }
8233 -
8234 8201 if (fc_ulp_busy_port(pptr->port_fp_handle) != 0) {
8235 8202 return (NDI_FAILURE);
8236 8203 }
8237 8204
8238 8205 if (is_mpxio) {
8239 8206 mdi_devi_enter(pptr->port_dip, &circ);
8240 8207 } else {
8241 8208 ndi_devi_enter(pptr->port_dip, &circ);
8242 8209 }
8243 8210
8244 8211 mutex_enter(&pptr->port_mutex);
8245 8212 mutex_enter(&plun->lun_mutex);
8246 8213
8247 8214 if (online == FCP_ONLINE) {
8248 8215 ccip = fcp_get_cip(plun, cip, lcount, tcount);
8249 8216 if (ccip == NULL) {
8250 8217 goto fail;
8251 8218 }
8252 8219 } else {
8253 8220 if (fcp_is_child_present(plun, cip) != FC_SUCCESS) {
8254 8221 goto fail;
8255 8222 }
8256 8223 ccip = cip;
8257 8224 }
8258 8225
8259 8226 if (online == FCP_ONLINE) {
8260 8227 rval = fcp_online_child(plun, ccip, lcount, tcount, flags,
8261 8228 &circ);
8262 8229 fc_ulp_log_device_event(pptr->port_fp_handle,
8263 8230 FC_ULP_DEVICE_ONLINE);
8264 8231 } else {
8265 8232 rval = fcp_offline_child(plun, ccip, lcount, tcount, flags,
8266 8233 &circ);
8267 8234 fc_ulp_log_device_event(pptr->port_fp_handle,
8268 8235 FC_ULP_DEVICE_OFFLINE);
8269 8236 }
8270 8237
8271 8238 fail: mutex_exit(&plun->lun_mutex);
8272 8239 mutex_exit(&pptr->port_mutex);
8273 8240
8274 8241 if (is_mpxio) {
8275 8242 mdi_devi_exit(pptr->port_dip, circ);
8276 8243 } else {
8277 8244 ndi_devi_exit(pptr->port_dip, circ);
8278 8245 }
8279 8246
8280 8247 fc_ulp_idle_port(pptr->port_fp_handle);
8281 8248
8282 8249 return (rval);
8283 8250 }
8284 8251
8285 8252
8286 8253 /*
8287 8254 * take a target offline by taking all of its LUNs offline
8288 8255 */
8289 8256 /*ARGSUSED*/
8290 8257 static int
8291 8258 fcp_offline_target(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8292 8259 int link_cnt, int tgt_cnt, int nowait, int flags)
8293 8260 {
8294 8261 struct fcp_tgt_elem *elem;
8295 8262
8296 8263 ASSERT(mutex_owned(&pptr->port_mutex));
8297 8264 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8298 8265
8299 8266 ASSERT(!(ptgt->tgt_state & FCP_TGT_OFFLINE));
8300 8267
8301 8268 if (link_cnt != pptr->port_link_cnt || (tgt_cnt && tgt_cnt !=
8302 8269 ptgt->tgt_change_cnt)) {
8303 8270 mutex_exit(&ptgt->tgt_mutex);
8304 8271 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_25);
8305 8272 mutex_enter(&ptgt->tgt_mutex);
8306 8273
8307 8274 return (0);
8308 8275 }
8309 8276
8310 8277 ptgt->tgt_pd_handle = NULL;
8311 8278 mutex_exit(&ptgt->tgt_mutex);
8312 8279 FCP_TGT_TRACE(ptgt, tgt_cnt, FCP_TGT_TRACE_26);
8313 8280 mutex_enter(&ptgt->tgt_mutex);
8314 8281
8315 8282 tgt_cnt = tgt_cnt ? tgt_cnt : ptgt->tgt_change_cnt;
8316 8283
8317 8284 if (ptgt->tgt_tcap &&
8318 8285 (elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8319 8286 elem->flags = flags;
8320 8287 elem->time = fcp_watchdog_time;
8321 8288 if (nowait == 0) {
8322 8289 elem->time += fcp_offline_delay;
8323 8290 }
8324 8291 elem->ptgt = ptgt;
8325 8292 elem->link_cnt = link_cnt;
8326 8293 elem->tgt_cnt = tgt_cnt;
8327 8294 elem->next = pptr->port_offline_tgts;
8328 8295 pptr->port_offline_tgts = elem;
8329 8296 } else {
8330 8297 fcp_offline_target_now(pptr, ptgt, link_cnt, tgt_cnt, flags);
8331 8298 }
8332 8299
8333 8300 return (1);
8334 8301 }
8335 8302
8336 8303
8337 8304 static void
8338 8305 fcp_offline_target_now(struct fcp_port *pptr, struct fcp_tgt *ptgt,
8339 8306 int link_cnt, int tgt_cnt, int flags)
8340 8307 {
8341 8308 ASSERT(mutex_owned(&pptr->port_mutex));
8342 8309 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8343 8310
8344 8311 fc_ulp_enable_relogin(pptr->port_fp_handle, &ptgt->tgt_port_wwn);
8345 8312 ptgt->tgt_state = FCP_TGT_OFFLINE;
8346 8313 ptgt->tgt_pd_handle = NULL;
8347 8314 fcp_offline_tgt_luns(ptgt, link_cnt, tgt_cnt, flags);
8348 8315 }
8349 8316
8350 8317
8351 8318 static void
8352 8319 fcp_offline_tgt_luns(struct fcp_tgt *ptgt, int link_cnt, int tgt_cnt,
8353 8320 int flags)
8354 8321 {
8355 8322 struct fcp_lun *plun;
8356 8323
8357 8324 ASSERT(mutex_owned(&ptgt->tgt_port->port_mutex));
8358 8325 ASSERT(mutex_owned(&ptgt->tgt_mutex));
8359 8326
8360 8327 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
8361 8328 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
8362 8329 fcp_offline_lun(plun, link_cnt, tgt_cnt, 1, flags);
8363 8330 }
8364 8331 }
8365 8332 }
8366 8333
8367 8334
8368 8335 /*
8369 8336 * take a LUN offline
8370 8337 *
8371 8338 * enters and leaves with the target mutex held, releasing it in the process
8372 8339 *
8373 8340 * allocates memory in non-sleep mode
8374 8341 */
8375 8342 static void
8376 8343 fcp_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8377 8344 int nowait, int flags)
8378 8345 {
8379 8346 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
8380 8347 struct fcp_lun_elem *elem;
8381 8348
8382 8349 ASSERT(plun != NULL);
8383 8350 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8384 8351
8385 8352 if (nowait) {
8386 8353 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8387 8354 return;
8388 8355 }
8389 8356
8390 8357 if ((elem = kmem_zalloc(sizeof (*elem), KM_NOSLEEP)) != NULL) {
8391 8358 elem->flags = flags;
8392 8359 elem->time = fcp_watchdog_time;
8393 8360 if (nowait == 0) {
8394 8361 elem->time += fcp_offline_delay;
8395 8362 }
8396 8363 elem->plun = plun;
8397 8364 elem->link_cnt = link_cnt;
8398 8365 elem->tgt_cnt = plun->lun_tgt->tgt_change_cnt;
8399 8366 elem->next = pptr->port_offline_luns;
8400 8367 pptr->port_offline_luns = elem;
8401 8368 } else {
8402 8369 fcp_offline_lun_now(plun, link_cnt, tgt_cnt, flags);
8403 8370 }
8404 8371 }
8405 8372
8406 8373
8407 8374 static void
8408 8375 fcp_prepare_offline_lun(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
8409 8376 {
8410 8377 struct fcp_pkt *head = NULL;
8411 8378
8412 8379 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8413 8380
8414 8381 mutex_exit(&LUN_TGT->tgt_mutex);
8415 8382
8416 8383 head = fcp_scan_commands(plun);
8417 8384 if (head != NULL) {
8418 8385 fcp_abort_commands(head, LUN_PORT);
8419 8386 }
8420 8387
8421 8388 mutex_enter(&LUN_TGT->tgt_mutex);
8422 8389
8423 8390 if (plun->lun_cip && plun->lun_mpxio) {
8424 8391 /*
8425 8392 * Intimate MPxIO lun busy is cleared
8426 8393 */
8427 8394 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip,
8428 8395 FCP_MPXIO_PATH_CLEAR_BUSY, link_cnt, tgt_cnt,
8429 8396 0, 0)) {
8430 8397 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8431 8398 "Can not ENABLE LUN; D_ID=%x, LUN=%x",
8432 8399 LUN_TGT->tgt_d_id, plun->lun_num);
8433 8400 }
8434 8401 /*
8435 8402 * Intimate MPxIO that the lun is now marked for offline
8436 8403 */
8437 8404 mutex_exit(&LUN_TGT->tgt_mutex);
8438 8405 (void) mdi_pi_disable_path(PIP(plun->lun_cip), DRIVER_DISABLE);
8439 8406 mutex_enter(&LUN_TGT->tgt_mutex);
8440 8407 }
8441 8408 }
8442 8409
8443 8410 static void
8444 8411 fcp_offline_lun_now(struct fcp_lun *plun, int link_cnt, int tgt_cnt,
8445 8412 int flags)
8446 8413 {
8447 8414 ASSERT(mutex_owned(&LUN_TGT->tgt_mutex));
8448 8415
8449 8416 mutex_exit(&LUN_TGT->tgt_mutex);
8450 8417 fcp_update_offline_flags(plun);
8451 8418 mutex_enter(&LUN_TGT->tgt_mutex);
8452 8419
8453 8420 fcp_prepare_offline_lun(plun, link_cnt, tgt_cnt);
8454 8421
8455 8422 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
8456 8423 fcp_trace, FCP_BUF_LEVEL_4, 0,
8457 8424 "offline_lun: passing OFFLINE elem to HP thread");
8458 8425
8459 8426 if (plun->lun_cip) {
8460 8427 fcp_log(CE_NOTE, LUN_PORT->port_dip,
8461 8428 "!offlining lun=%x (trace=%x), target=%x (trace=%x)",
8462 8429 plun->lun_num, plun->lun_trace, LUN_TGT->tgt_d_id,
8463 8430 LUN_TGT->tgt_trace);
8464 8431
8465 8432 if (!fcp_pass_to_hp(LUN_PORT, plun, plun->lun_cip, FCP_OFFLINE,
8466 8433 link_cnt, tgt_cnt, flags, 0)) {
8467 8434 fcp_log(CE_CONT, LUN_PORT->port_dip,
8468 8435 "Can not OFFLINE LUN; D_ID=%x, LUN=%x\n",
8469 8436 LUN_TGT->tgt_d_id, plun->lun_num);
8470 8437 }
8471 8438 }
8472 8439 }
8473 8440
8474 8441 static void
8475 8442 fcp_scan_offline_luns(struct fcp_port *pptr)
8476 8443 {
8477 8444 struct fcp_lun_elem *elem;
8478 8445 struct fcp_lun_elem *prev;
8479 8446 struct fcp_lun_elem *next;
8480 8447
8481 8448 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8482 8449
8483 8450 prev = NULL;
8484 8451 elem = pptr->port_offline_luns;
8485 8452 while (elem) {
8486 8453 next = elem->next;
8487 8454 if (elem->time <= fcp_watchdog_time) {
8488 8455 int changed = 1;
8489 8456 struct fcp_tgt *ptgt = elem->plun->lun_tgt;
8490 8457
8491 8458 mutex_enter(&ptgt->tgt_mutex);
8492 8459 if (pptr->port_link_cnt == elem->link_cnt &&
8493 8460 ptgt->tgt_change_cnt == elem->tgt_cnt) {
8494 8461 changed = 0;
8495 8462 }
8496 8463
8497 8464 if (!changed &&
8498 8465 !(elem->plun->lun_state & FCP_TGT_OFFLINE)) {
8499 8466 fcp_offline_lun_now(elem->plun,
8500 8467 elem->link_cnt, elem->tgt_cnt, elem->flags);
8501 8468 }
8502 8469 mutex_exit(&ptgt->tgt_mutex);
8503 8470
8504 8471 kmem_free(elem, sizeof (*elem));
8505 8472
8506 8473 if (prev) {
8507 8474 prev->next = next;
8508 8475 } else {
8509 8476 pptr->port_offline_luns = next;
8510 8477 }
8511 8478 } else {
8512 8479 prev = elem;
8513 8480 }
8514 8481 elem = next;
8515 8482 }
8516 8483 }
8517 8484
8518 8485
8519 8486 static void
8520 8487 fcp_scan_offline_tgts(struct fcp_port *pptr)
8521 8488 {
8522 8489 struct fcp_tgt_elem *elem;
8523 8490 struct fcp_tgt_elem *prev;
8524 8491 struct fcp_tgt_elem *next;
8525 8492
8526 8493 ASSERT(MUTEX_HELD(&pptr->port_mutex));
8527 8494
8528 8495 prev = NULL;
8529 8496 elem = pptr->port_offline_tgts;
8530 8497 while (elem) {
8531 8498 next = elem->next;
8532 8499 if (elem->time <= fcp_watchdog_time) {
8533 8500 int outdated = 1;
8534 8501 struct fcp_tgt *ptgt = elem->ptgt;
8535 8502
8536 8503 mutex_enter(&ptgt->tgt_mutex);
8537 8504
8538 8505 if (ptgt->tgt_change_cnt == elem->tgt_cnt) {
8539 8506 /* No change on tgt since elem was created. */
8540 8507 outdated = 0;
8541 8508 } else if (ptgt->tgt_change_cnt == elem->tgt_cnt + 1 &&
8542 8509 pptr->port_link_cnt == elem->link_cnt + 1 &&
8543 8510 ptgt->tgt_statec_cause == FCP_CAUSE_LINK_DOWN) {
8544 8511 /*
8545 8512 * Exactly one thing happened to the target
8546 8513 * inbetween: the local port went offline.
8547 8514 * For fp the remote port is already gone so
8548 8515 * it will not tell us again to offline the
8549 8516 * target. We must offline it now.
8550 8517 */
8551 8518 outdated = 0;
8552 8519 }
8553 8520
8554 8521 if (!outdated && !(ptgt->tgt_state &
8555 8522 FCP_TGT_OFFLINE)) {
8556 8523 fcp_offline_target_now(pptr,
8557 8524 ptgt, elem->link_cnt, elem->tgt_cnt,
8558 8525 elem->flags);
8559 8526 }
8560 8527
8561 8528 mutex_exit(&ptgt->tgt_mutex);
8562 8529
8563 8530 kmem_free(elem, sizeof (*elem));
8564 8531
8565 8532 if (prev) {
8566 8533 prev->next = next;
8567 8534 } else {
8568 8535 pptr->port_offline_tgts = next;
8569 8536 }
8570 8537 } else {
8571 8538 prev = elem;
8572 8539 }
8573 8540 elem = next;
8574 8541 }
8575 8542 }
8576 8543
8577 8544
8578 8545 static void
8579 8546 fcp_update_offline_flags(struct fcp_lun *plun)
8580 8547 {
8581 8548 struct fcp_port *pptr = LUN_PORT;
8582 8549 ASSERT(plun != NULL);
8583 8550
8584 8551 mutex_enter(&LUN_TGT->tgt_mutex);
8585 8552 plun->lun_state |= FCP_LUN_OFFLINE;
8586 8553 plun->lun_state &= ~(FCP_LUN_INIT | FCP_LUN_BUSY | FCP_LUN_MARK);
8587 8554
8588 8555 mutex_enter(&plun->lun_mutex);
8589 8556 if (plun->lun_cip && plun->lun_state & FCP_SCSI_LUN_TGT_INIT) {
8590 8557 dev_info_t *cdip = NULL;
8591 8558
8592 8559 mutex_exit(&LUN_TGT->tgt_mutex);
8593 8560
8594 8561 if (plun->lun_mpxio == 0) {
8595 8562 cdip = DIP(plun->lun_cip);
8596 8563 } else if (plun->lun_cip) {
8597 8564 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
8598 8565 }
8599 8566
8600 8567 mutex_exit(&plun->lun_mutex);
8601 8568 if (cdip) {
8602 8569 (void) ndi_event_retrieve_cookie(
8603 8570 pptr->port_ndi_event_hdl, cdip, FCAL_REMOVE_EVENT,
8604 8571 &fcp_remove_eid, NDI_EVENT_NOPASS);
8605 8572 (void) ndi_event_run_callbacks(
8606 8573 pptr->port_ndi_event_hdl, cdip,
8607 8574 fcp_remove_eid, NULL);
8608 8575 }
8609 8576 } else {
8610 8577 mutex_exit(&plun->lun_mutex);
8611 8578 mutex_exit(&LUN_TGT->tgt_mutex);
8612 8579 }
8613 8580 }
8614 8581
8615 8582
8616 8583 /*
8617 8584 * Scan all of the command pkts for this port, moving pkts that
8618 8585 * match our LUN onto our own list (headed by "head")
8619 8586 */
8620 8587 static struct fcp_pkt *
8621 8588 fcp_scan_commands(struct fcp_lun *plun)
8622 8589 {
8623 8590 struct fcp_port *pptr = LUN_PORT;
8624 8591
8625 8592 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8626 8593 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8627 8594 struct fcp_pkt *pcmd = NULL; /* the previous command */
8628 8595
8629 8596 struct fcp_pkt *head = NULL; /* head of our list */
8630 8597 struct fcp_pkt *tail = NULL; /* tail of our list */
8631 8598
8632 8599 int cmds_found = 0;
8633 8600
8634 8601 mutex_enter(&pptr->port_pkt_mutex);
8635 8602 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
8636 8603 struct fcp_lun *tlun =
8637 8604 ADDR2LUN(&cmd->cmd_pkt->pkt_address);
8638 8605
8639 8606 ncmd = cmd->cmd_next; /* set next command */
8640 8607
8641 8608 /*
8642 8609 * if this pkt is for a different LUN or the
8643 8610 * command is sent down, skip it.
8644 8611 */
8645 8612 if (tlun != plun || cmd->cmd_state == FCP_PKT_ISSUED ||
8646 8613 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR)) {
8647 8614 pcmd = cmd;
8648 8615 continue;
8649 8616 }
8650 8617 cmds_found++;
8651 8618 if (pcmd != NULL) {
8652 8619 ASSERT(pptr->port_pkt_head != cmd);
8653 8620 pcmd->cmd_next = cmd->cmd_next;
8654 8621 } else {
8655 8622 ASSERT(cmd == pptr->port_pkt_head);
8656 8623 pptr->port_pkt_head = cmd->cmd_next;
8657 8624 }
8658 8625
8659 8626 if (cmd == pptr->port_pkt_tail) {
8660 8627 pptr->port_pkt_tail = pcmd;
8661 8628 if (pcmd) {
8662 8629 pcmd->cmd_next = NULL;
8663 8630 }
8664 8631 }
8665 8632
8666 8633 if (head == NULL) {
8667 8634 head = tail = cmd;
8668 8635 } else {
8669 8636 ASSERT(tail != NULL);
8670 8637
8671 8638 tail->cmd_next = cmd;
8672 8639 tail = cmd;
8673 8640 }
8674 8641 cmd->cmd_next = NULL;
8675 8642 }
8676 8643 mutex_exit(&pptr->port_pkt_mutex);
8677 8644
8678 8645 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8679 8646 fcp_trace, FCP_BUF_LEVEL_8, 0,
8680 8647 "scan commands: %d cmd(s) found", cmds_found);
8681 8648
8682 8649 return (head);
8683 8650 }
8684 8651
8685 8652
8686 8653 /*
8687 8654 * Abort all the commands in the command queue
8688 8655 */
8689 8656 static void
8690 8657 fcp_abort_commands(struct fcp_pkt *head, struct fcp_port *pptr)
8691 8658 {
8692 8659 struct fcp_pkt *cmd = NULL; /* pkt cmd ptr */
8693 8660 struct fcp_pkt *ncmd = NULL; /* next pkt ptr */
8694 8661
8695 8662 ASSERT(mutex_owned(&pptr->port_mutex));
8696 8663
8697 8664 /* scan through the pkts and invalid them */
8698 8665 for (cmd = head; cmd != NULL; cmd = ncmd) {
8699 8666 struct scsi_pkt *pkt = cmd->cmd_pkt;
8700 8667
8701 8668 ncmd = cmd->cmd_next;
8702 8669 ASSERT(pkt != NULL);
8703 8670
8704 8671 /*
8705 8672 * The lun is going to be marked offline. Indicate
8706 8673 * the target driver not to requeue or retry this command
8707 8674 * as the device is going to be offlined pretty soon.
8708 8675 */
8709 8676 pkt->pkt_reason = CMD_DEV_GONE;
8710 8677 pkt->pkt_statistics = 0;
8711 8678 pkt->pkt_state = 0;
8712 8679
8713 8680 /* reset cmd flags/state */
8714 8681 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
8715 8682 cmd->cmd_state = FCP_PKT_IDLE;
8716 8683
8717 8684 /*
8718 8685 * ensure we have a packet completion routine,
8719 8686 * then call it.
8720 8687 */
8721 8688 ASSERT(pkt->pkt_comp != NULL);
8722 8689
8723 8690 mutex_exit(&pptr->port_mutex);
8724 8691 fcp_post_callback(cmd);
8725 8692 mutex_enter(&pptr->port_mutex);
8726 8693 }
8727 8694 }
8728 8695
8729 8696
8730 8697 /*
8731 8698 * the pkt_comp callback for command packets
8732 8699 */
8733 8700 static void
8734 8701 fcp_cmd_callback(fc_packet_t *fpkt)
8735 8702 {
8736 8703 struct fcp_pkt *cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
8737 8704 struct scsi_pkt *pkt = cmd->cmd_pkt;
8738 8705 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8739 8706
8740 8707 ASSERT(cmd->cmd_state != FCP_PKT_IDLE);
8741 8708
8742 8709 if (cmd->cmd_state == FCP_PKT_IDLE) {
8743 8710 cmn_err(CE_PANIC, "Packet already completed %p",
8744 8711 (void *)cmd);
8745 8712 }
8746 8713
8747 8714 /*
8748 8715 * Watch thread should be freeing the packet, ignore the pkt.
8749 8716 */
8750 8717 if (cmd->cmd_state == FCP_PKT_ABORTING) {
8751 8718 fcp_log(CE_CONT, pptr->port_dip,
8752 8719 "!FCP: Pkt completed while aborting\n");
8753 8720 return;
8754 8721 }
8755 8722 cmd->cmd_state = FCP_PKT_IDLE;
8756 8723
8757 8724 fcp_complete_pkt(fpkt);
8758 8725
8759 8726 #ifdef DEBUG
8760 8727 mutex_enter(&pptr->port_pkt_mutex);
8761 8728 pptr->port_npkts--;
8762 8729 mutex_exit(&pptr->port_pkt_mutex);
8763 8730 #endif /* DEBUG */
8764 8731
8765 8732 fcp_post_callback(cmd);
8766 8733 }
8767 8734
8768 8735
8769 8736 static void
8770 8737 fcp_complete_pkt(fc_packet_t *fpkt)
8771 8738 {
8772 8739 int error = 0;
8773 8740 struct fcp_pkt *cmd = (struct fcp_pkt *)
8774 8741 fpkt->pkt_ulp_private;
8775 8742 struct scsi_pkt *pkt = cmd->cmd_pkt;
8776 8743 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
8777 8744 struct fcp_lun *plun;
8778 8745 struct fcp_tgt *ptgt;
8779 8746 struct fcp_rsp *rsp;
8780 8747 struct scsi_address save;
8781 8748
8782 8749 #ifdef DEBUG
8783 8750 save = pkt->pkt_address;
8784 8751 #endif /* DEBUG */
8785 8752
8786 8753 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
8787 8754
8788 8755 if (fpkt->pkt_state == FC_PKT_SUCCESS) {
8789 8756 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8790 8757 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
8791 8758 sizeof (struct fcp_rsp));
8792 8759 }
8793 8760
8794 8761 pkt->pkt_state = STATE_GOT_BUS | STATE_GOT_TARGET |
8795 8762 STATE_SENT_CMD | STATE_GOT_STATUS;
8796 8763
8797 8764 pkt->pkt_resid = 0;
8798 8765
8799 8766 if (fpkt->pkt_datalen) {
8800 8767 pkt->pkt_state |= STATE_XFERRED_DATA;
8801 8768 if (fpkt->pkt_data_resid) {
8802 8769 error++;
8803 8770 }
8804 8771 }
8805 8772
8806 8773 if ((pkt->pkt_scbp != NULL) && ((*(pkt->pkt_scbp) =
8807 8774 rsp->fcp_u.fcp_status.scsi_status) != STATUS_GOOD)) {
8808 8775 /*
8809 8776 * The next two checks make sure that if there
8810 8777 * is no sense data or a valid response and
8811 8778 * the command came back with check condition,
8812 8779 * the command should be retried.
8813 8780 */
8814 8781 if (!rsp->fcp_u.fcp_status.rsp_len_set &&
8815 8782 !rsp->fcp_u.fcp_status.sense_len_set) {
8816 8783 pkt->pkt_state &= ~STATE_XFERRED_DATA;
8817 8784 pkt->pkt_resid = cmd->cmd_dmacount;
8818 8785 }
8819 8786 }
8820 8787
8821 8788 if ((error | rsp->fcp_u.i_fcp_status | rsp->fcp_resid) == 0) {
8822 8789 return;
8823 8790 }
8824 8791
8825 8792 plun = ADDR2LUN(&pkt->pkt_address);
8826 8793 ptgt = plun->lun_tgt;
8827 8794 ASSERT(ptgt != NULL);
8828 8795
8829 8796 /*
8830 8797 * Update the transfer resid, if appropriate
8831 8798 */
8832 8799 if (rsp->fcp_u.fcp_status.resid_over ||
8833 8800 rsp->fcp_u.fcp_status.resid_under) {
8834 8801 pkt->pkt_resid = rsp->fcp_resid;
8835 8802 }
8836 8803
8837 8804 /*
8838 8805 * First see if we got a FCP protocol error.
8839 8806 */
8840 8807 if (rsp->fcp_u.fcp_status.rsp_len_set) {
8841 8808 struct fcp_rsp_info *bep;
8842 8809 bep = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
8843 8810 sizeof (struct fcp_rsp));
8844 8811
8845 8812 if (fcp_validate_fcp_response(rsp, pptr) !=
8846 8813 FC_SUCCESS) {
8847 8814 pkt->pkt_reason = CMD_CMPLT;
8848 8815 *(pkt->pkt_scbp) = STATUS_CHECK;
8849 8816
8850 8817 fcp_log(CE_WARN, pptr->port_dip,
8851 8818 "!SCSI command to d_id=0x%x lun=0x%x"
8852 8819 " failed, Bad FCP response values:"
8853 8820 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8854 8821 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8855 8822 ptgt->tgt_d_id, plun->lun_num,
8856 8823 rsp->reserved_0, rsp->reserved_1,
8857 8824 rsp->fcp_u.fcp_status.reserved_0,
8858 8825 rsp->fcp_u.fcp_status.reserved_1,
8859 8826 rsp->fcp_response_len, rsp->fcp_sense_len);
8860 8827
8861 8828 return;
8862 8829 }
8863 8830
8864 8831 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8865 8832 FCP_CP_IN(fpkt->pkt_resp +
8866 8833 sizeof (struct fcp_rsp), bep,
8867 8834 fpkt->pkt_resp_acc,
8868 8835 sizeof (struct fcp_rsp_info));
8869 8836 }
8870 8837
8871 8838 if (bep->rsp_code != FCP_NO_FAILURE) {
8872 8839 child_info_t *cip;
8873 8840
8874 8841 pkt->pkt_reason = CMD_TRAN_ERR;
8875 8842
8876 8843 mutex_enter(&plun->lun_mutex);
8877 8844 cip = plun->lun_cip;
8878 8845 mutex_exit(&plun->lun_mutex);
8879 8846
8880 8847 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8881 8848 fcp_trace, FCP_BUF_LEVEL_2, 0,
8882 8849 "FCP response error on cmd=%p"
8883 8850 " target=0x%x, cip=%p", cmd,
8884 8851 ptgt->tgt_d_id, cip);
8885 8852 }
8886 8853 }
8887 8854
8888 8855 /*
8889 8856 * See if we got a SCSI error with sense data
8890 8857 */
8891 8858 if (rsp->fcp_u.fcp_status.sense_len_set) {
8892 8859 uchar_t rqlen;
8893 8860 caddr_t sense_from;
8894 8861 child_info_t *cip;
8895 8862 timeout_id_t tid;
8896 8863 struct scsi_arq_status *arq;
8897 8864 struct scsi_extended_sense *sense_to;
8898 8865
8899 8866 arq = (struct scsi_arq_status *)pkt->pkt_scbp;
8900 8867 sense_to = &arq->sts_sensedata;
8901 8868
8902 8869 rqlen = (uchar_t)min(rsp->fcp_sense_len,
8903 8870 sizeof (struct scsi_extended_sense));
8904 8871
8905 8872 sense_from = (caddr_t)fpkt->pkt_resp +
8906 8873 sizeof (struct fcp_rsp) + rsp->fcp_response_len;
8907 8874
8908 8875 if (fcp_validate_fcp_response(rsp, pptr) !=
8909 8876 FC_SUCCESS) {
8910 8877 pkt->pkt_reason = CMD_CMPLT;
8911 8878 *(pkt->pkt_scbp) = STATUS_CHECK;
8912 8879
8913 8880 fcp_log(CE_WARN, pptr->port_dip,
8914 8881 "!SCSI command to d_id=0x%x lun=0x%x"
8915 8882 " failed, Bad FCP response values:"
8916 8883 " rsvd1=%x, rsvd2=%x, sts-rsvd1=%x,"
8917 8884 " sts-rsvd2=%x, rsplen=%x, senselen=%x",
8918 8885 ptgt->tgt_d_id, plun->lun_num,
8919 8886 rsp->reserved_0, rsp->reserved_1,
8920 8887 rsp->fcp_u.fcp_status.reserved_0,
8921 8888 rsp->fcp_u.fcp_status.reserved_1,
8922 8889 rsp->fcp_response_len, rsp->fcp_sense_len);
8923 8890
8924 8891 return;
8925 8892 }
8926 8893
8927 8894 /*
8928 8895 * copy in sense information
8929 8896 */
8930 8897 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
8931 8898 FCP_CP_IN(sense_from, sense_to,
8932 8899 fpkt->pkt_resp_acc, rqlen);
8933 8900 } else {
8934 8901 bcopy(sense_from, sense_to, rqlen);
8935 8902 }
8936 8903
8937 8904 if ((FCP_SENSE_REPORTLUN_CHANGED(sense_to)) ||
8938 8905 (FCP_SENSE_NO_LUN(sense_to))) {
8939 8906 mutex_enter(&ptgt->tgt_mutex);
8940 8907 if (ptgt->tgt_tid == NULL) {
8941 8908 /*
8942 8909 * Kick off rediscovery
8943 8910 */
8944 8911 tid = timeout(fcp_reconfigure_luns,
8945 8912 (caddr_t)ptgt, drv_usectohz(1));
8946 8913
8947 8914 ptgt->tgt_tid = tid;
8948 8915 ptgt->tgt_state |= FCP_TGT_BUSY;
8949 8916 }
8950 8917 mutex_exit(&ptgt->tgt_mutex);
8951 8918 if (FCP_SENSE_REPORTLUN_CHANGED(sense_to)) {
8952 8919 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8953 8920 fcp_trace, FCP_BUF_LEVEL_3, 0,
8954 8921 "!FCP: Report Lun Has Changed"
8955 8922 " target=%x", ptgt->tgt_d_id);
8956 8923 } else if (FCP_SENSE_NO_LUN(sense_to)) {
8957 8924 FCP_TRACE(fcp_logq, pptr->port_instbuf,
8958 8925 fcp_trace, FCP_BUF_LEVEL_3, 0,
8959 8926 "!FCP: LU Not Supported"
8960 8927 " target=%x", ptgt->tgt_d_id);
8961 8928 }
8962 8929 }
8963 8930 ASSERT(pkt->pkt_scbp != NULL);
8964 8931
8965 8932 pkt->pkt_state |= STATE_ARQ_DONE;
8966 8933
8967 8934 arq->sts_rqpkt_resid = SENSE_LENGTH - rqlen;
8968 8935
8969 8936 *((uchar_t *)&arq->sts_rqpkt_status) = STATUS_GOOD;
8970 8937 arq->sts_rqpkt_reason = 0;
8971 8938 arq->sts_rqpkt_statistics = 0;
8972 8939
8973 8940 arq->sts_rqpkt_state = STATE_GOT_BUS |
8974 8941 STATE_GOT_TARGET | STATE_SENT_CMD |
8975 8942 STATE_GOT_STATUS | STATE_ARQ_DONE |
8976 8943 STATE_XFERRED_DATA;
8977 8944
8978 8945 mutex_enter(&plun->lun_mutex);
8979 8946 cip = plun->lun_cip;
8980 8947 mutex_exit(&plun->lun_mutex);
8981 8948
8982 8949 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
8983 8950 fcp_trace, FCP_BUF_LEVEL_8, 0,
8984 8951 "SCSI Check condition on cmd=%p target=0x%x"
8985 8952 " LUN=%p, cmd=%x SCSI status=%x, es key=%x"
8986 8953 " ASC=%x ASCQ=%x", cmd, ptgt->tgt_d_id, cip,
8987 8954 cmd->cmd_fcp_cmd.fcp_cdb[0],
8988 8955 rsp->fcp_u.fcp_status.scsi_status,
8989 8956 sense_to->es_key, sense_to->es_add_code,
8990 8957 sense_to->es_qual_code);
8991 8958 }
8992 8959 } else {
8993 8960 plun = ADDR2LUN(&pkt->pkt_address);
8994 8961 ptgt = plun->lun_tgt;
8995 8962 ASSERT(ptgt != NULL);
8996 8963
8997 8964 /*
8998 8965 * Work harder to translate errors into target driver
8999 8966 * understandable ones. Note with despair that the target
9000 8967 * drivers don't decode pkt_state and pkt_reason exhaustively
9001 8968 * They resort to using the big hammer most often, which
9002 8969 * may not get fixed in the life time of this driver.
9003 8970 */
9004 8971 pkt->pkt_state = 0;
9005 8972 pkt->pkt_statistics = 0;
9006 8973
9007 8974 switch (fpkt->pkt_state) {
9008 8975 case FC_PKT_TRAN_ERROR:
9009 8976 switch (fpkt->pkt_reason) {
9010 8977 case FC_REASON_OVERRUN:
9011 8978 pkt->pkt_reason = CMD_CMD_OVR;
9012 8979 pkt->pkt_statistics |= STAT_ABORTED;
9013 8980 break;
9014 8981
9015 8982 case FC_REASON_XCHG_BSY: {
9016 8983 caddr_t ptr;
9017 8984
9018 8985 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9019 8986
9020 8987 ptr = (caddr_t)pkt->pkt_scbp;
9021 8988 if (ptr) {
9022 8989 *ptr = STATUS_BUSY;
9023 8990 }
9024 8991 break;
9025 8992 }
9026 8993
9027 8994 case FC_REASON_ABORTED:
9028 8995 pkt->pkt_reason = CMD_TRAN_ERR;
9029 8996 pkt->pkt_statistics |= STAT_ABORTED;
9030 8997 break;
9031 8998
9032 8999 case FC_REASON_ABORT_FAILED:
9033 9000 pkt->pkt_reason = CMD_ABORT_FAIL;
9034 9001 break;
9035 9002
9036 9003 case FC_REASON_NO_SEQ_INIT:
9037 9004 case FC_REASON_CRC_ERROR:
9038 9005 pkt->pkt_reason = CMD_TRAN_ERR;
9039 9006 pkt->pkt_statistics |= STAT_ABORTED;
9040 9007 break;
9041 9008 default:
9042 9009 pkt->pkt_reason = CMD_TRAN_ERR;
9043 9010 break;
9044 9011 }
9045 9012 break;
9046 9013
9047 9014 case FC_PKT_PORT_OFFLINE: {
9048 9015 dev_info_t *cdip = NULL;
9049 9016 caddr_t ptr;
9050 9017
9051 9018 if (fpkt->pkt_reason == FC_REASON_LOGIN_REQUIRED) {
9052 9019 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9053 9020 fcp_trace, FCP_BUF_LEVEL_8, 0,
9054 9021 "SCSI cmd; LOGIN REQUIRED from FCA for %x",
9055 9022 ptgt->tgt_d_id);
9056 9023 }
9057 9024
9058 9025 mutex_enter(&plun->lun_mutex);
9059 9026 if (plun->lun_mpxio == 0) {
9060 9027 cdip = DIP(plun->lun_cip);
9061 9028 } else if (plun->lun_cip) {
9062 9029 cdip = mdi_pi_get_client(PIP(plun->lun_cip));
9063 9030 }
9064 9031
9065 9032 mutex_exit(&plun->lun_mutex);
9066 9033
9067 9034 if (cdip) {
9068 9035 (void) ndi_event_retrieve_cookie(
9069 9036 pptr->port_ndi_event_hdl, cdip,
9070 9037 FCAL_REMOVE_EVENT, &fcp_remove_eid,
9071 9038 NDI_EVENT_NOPASS);
9072 9039 (void) ndi_event_run_callbacks(
9073 9040 pptr->port_ndi_event_hdl, cdip,
9074 9041 fcp_remove_eid, NULL);
9075 9042 }
9076 9043
9077 9044 /*
9078 9045 * If the link goes off-line for a lip,
9079 9046 * this will cause a error to the ST SG
9080 9047 * SGEN drivers. By setting BUSY we will
9081 9048 * give the drivers the chance to retry
9082 9049 * before it blows of the job. ST will
9083 9050 * remember how many times it has retried.
9084 9051 */
9085 9052
9086 9053 if ((plun->lun_type == DTYPE_SEQUENTIAL) ||
9087 9054 (plun->lun_type == DTYPE_CHANGER)) {
9088 9055 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9089 9056 ptr = (caddr_t)pkt->pkt_scbp;
9090 9057 if (ptr) {
9091 9058 *ptr = STATUS_BUSY;
9092 9059 }
9093 9060 } else {
9094 9061 pkt->pkt_reason = CMD_TRAN_ERR;
9095 9062 pkt->pkt_statistics |= STAT_BUS_RESET;
9096 9063 }
9097 9064 break;
9098 9065 }
9099 9066
9100 9067 case FC_PKT_TRAN_BSY:
9101 9068 /*
9102 9069 * Use the ssd Qfull handling here.
9103 9070 */
9104 9071 *pkt->pkt_scbp = STATUS_INTERMEDIATE;
9105 9072 pkt->pkt_state = STATE_GOT_BUS;
9106 9073 break;
9107 9074
9108 9075 case FC_PKT_TIMEOUT:
9109 9076 pkt->pkt_reason = CMD_TIMEOUT;
9110 9077 if (fpkt->pkt_reason == FC_REASON_ABORT_FAILED) {
9111 9078 pkt->pkt_statistics |= STAT_TIMEOUT;
9112 9079 } else {
9113 9080 pkt->pkt_statistics |= STAT_ABORTED;
9114 9081 }
9115 9082 break;
9116 9083
9117 9084 case FC_PKT_LOCAL_RJT:
9118 9085 switch (fpkt->pkt_reason) {
9119 9086 case FC_REASON_OFFLINE: {
9120 9087 dev_info_t *cdip = NULL;
9121 9088
9122 9089 mutex_enter(&plun->lun_mutex);
9123 9090 if (plun->lun_mpxio == 0) {
9124 9091 cdip = DIP(plun->lun_cip);
9125 9092 } else if (plun->lun_cip) {
9126 9093 cdip = mdi_pi_get_client(
9127 9094 PIP(plun->lun_cip));
9128 9095 }
9129 9096 mutex_exit(&plun->lun_mutex);
9130 9097
9131 9098 if (cdip) {
9132 9099 (void) ndi_event_retrieve_cookie(
9133 9100 pptr->port_ndi_event_hdl, cdip,
9134 9101 FCAL_REMOVE_EVENT,
9135 9102 &fcp_remove_eid,
9136 9103 NDI_EVENT_NOPASS);
9137 9104 (void) ndi_event_run_callbacks(
9138 9105 pptr->port_ndi_event_hdl,
9139 9106 cdip, fcp_remove_eid, NULL);
9140 9107 }
9141 9108
9142 9109 pkt->pkt_reason = CMD_TRAN_ERR;
9143 9110 pkt->pkt_statistics |= STAT_BUS_RESET;
9144 9111
9145 9112 break;
9146 9113 }
9147 9114
9148 9115 case FC_REASON_NOMEM:
9149 9116 case FC_REASON_QFULL: {
9150 9117 caddr_t ptr;
9151 9118
9152 9119 pkt->pkt_reason = CMD_CMPLT; /* Lie */
9153 9120 ptr = (caddr_t)pkt->pkt_scbp;
9154 9121 if (ptr) {
9155 9122 *ptr = STATUS_BUSY;
9156 9123 }
9157 9124 break;
9158 9125 }
9159 9126
9160 9127 case FC_REASON_DMA_ERROR:
9161 9128 pkt->pkt_reason = CMD_DMA_DERR;
9162 9129 pkt->pkt_statistics |= STAT_ABORTED;
9163 9130 break;
9164 9131
9165 9132 case FC_REASON_CRC_ERROR:
9166 9133 case FC_REASON_UNDERRUN: {
9167 9134 uchar_t status;
9168 9135 /*
9169 9136 * Work around for Bugid: 4240945.
9170 9137 * IB on A5k doesn't set the Underrun bit
9171 9138 * in the fcp status, when it is transferring
9172 9139 * less than requested amount of data. Work
9173 9140 * around the ses problem to keep luxadm
9174 9141 * happy till ibfirmware is fixed.
9175 9142 */
9176 9143 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
9177 9144 FCP_CP_IN(fpkt->pkt_resp, rsp,
9178 9145 fpkt->pkt_resp_acc,
9179 9146 sizeof (struct fcp_rsp));
9180 9147 }
9181 9148 status = rsp->fcp_u.fcp_status.scsi_status;
9182 9149 if (((plun->lun_type & DTYPE_MASK) ==
9183 9150 DTYPE_ESI) && (status == STATUS_GOOD)) {
9184 9151 pkt->pkt_reason = CMD_CMPLT;
9185 9152 *pkt->pkt_scbp = status;
9186 9153 pkt->pkt_resid = 0;
9187 9154 } else {
9188 9155 pkt->pkt_reason = CMD_TRAN_ERR;
9189 9156 pkt->pkt_statistics |= STAT_ABORTED;
9190 9157 }
9191 9158 break;
9192 9159 }
9193 9160
9194 9161 case FC_REASON_NO_CONNECTION:
9195 9162 case FC_REASON_UNSUPPORTED:
9196 9163 case FC_REASON_ILLEGAL_REQ:
9197 9164 case FC_REASON_BAD_SID:
9198 9165 case FC_REASON_DIAG_BUSY:
9199 9166 case FC_REASON_FCAL_OPN_FAIL:
9200 9167 case FC_REASON_BAD_XID:
9201 9168 default:
9202 9169 pkt->pkt_reason = CMD_TRAN_ERR;
9203 9170 pkt->pkt_statistics |= STAT_ABORTED;
9204 9171 break;
9205 9172
9206 9173 }
9207 9174 break;
9208 9175
9209 9176 case FC_PKT_NPORT_RJT:
9210 9177 case FC_PKT_FABRIC_RJT:
9211 9178 case FC_PKT_NPORT_BSY:
9212 9179 case FC_PKT_FABRIC_BSY:
9213 9180 default:
9214 9181 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9215 9182 fcp_trace, FCP_BUF_LEVEL_8, 0,
9216 9183 "FC Status 0x%x, reason 0x%x",
9217 9184 fpkt->pkt_state, fpkt->pkt_reason);
9218 9185 pkt->pkt_reason = CMD_TRAN_ERR;
9219 9186 pkt->pkt_statistics |= STAT_ABORTED;
9220 9187 break;
9221 9188 }
9222 9189
9223 9190 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9224 9191 fcp_trace, FCP_BUF_LEVEL_9, 0,
9225 9192 "!FC error on cmd=%p target=0x%x: pkt state=0x%x "
9226 9193 " pkt reason=0x%x", cmd, ptgt->tgt_d_id, fpkt->pkt_state,
9227 9194 fpkt->pkt_reason);
9228 9195 }
9229 9196
9230 9197 ASSERT(save.a_hba_tran == pkt->pkt_address.a_hba_tran);
9231 9198 }
9232 9199
9233 9200
9234 9201 static int
9235 9202 fcp_validate_fcp_response(struct fcp_rsp *rsp, struct fcp_port *pptr)
9236 9203 {
9237 9204 if (rsp->reserved_0 || rsp->reserved_1 ||
9238 9205 rsp->fcp_u.fcp_status.reserved_0 ||
9239 9206 rsp->fcp_u.fcp_status.reserved_1) {
9240 9207 /*
9241 9208 * These reserved fields should ideally be zero. FCP-2 does say
9242 9209 * that the recipient need not check for reserved fields to be
9243 9210 * zero. If they are not zero, we will not make a fuss about it
9244 9211 * - just log it (in debug to both trace buffer and messages
9245 9212 * file and to trace buffer only in non-debug) and move on.
9246 9213 *
9247 9214 * Non-zero reserved fields were seen with minnows.
9248 9215 *
9249 9216 * qlc takes care of some of this but we cannot assume that all
9250 9217 * FCAs will do so.
9251 9218 */
9252 9219 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
9253 9220 FCP_BUF_LEVEL_5, 0,
9254 9221 "Got fcp response packet with non-zero reserved fields "
9255 9222 "rsp->reserved_0:0x%x, rsp_reserved_1:0x%x, "
9256 9223 "status.reserved_0:0x%x, status.reserved_1:0x%x",
9257 9224 rsp->reserved_0, rsp->reserved_1,
9258 9225 rsp->fcp_u.fcp_status.reserved_0,
9259 9226 rsp->fcp_u.fcp_status.reserved_1);
9260 9227 }
9261 9228
9262 9229 if (rsp->fcp_u.fcp_status.rsp_len_set && (rsp->fcp_response_len >
9263 9230 (FCP_MAX_RSP_IU_SIZE - sizeof (struct fcp_rsp)))) {
9264 9231 return (FC_FAILURE);
9265 9232 }
9266 9233
9267 9234 if (rsp->fcp_u.fcp_status.sense_len_set && rsp->fcp_sense_len >
9268 9235 (FCP_MAX_RSP_IU_SIZE - rsp->fcp_response_len -
9269 9236 sizeof (struct fcp_rsp))) {
9270 9237 return (FC_FAILURE);
9271 9238 }
9272 9239
9273 9240 return (FC_SUCCESS);
9274 9241 }
9275 9242
9276 9243
9277 9244 /*
9278 9245 * This is called when there is a change the in device state. The case we're
9279 9246 * handling here is, if the d_id s does not match, offline this tgt and online
9280 9247 * a new tgt with the new d_id. called from fcp_handle_devices with
9281 9248 * port_mutex held.
9282 9249 */
9283 9250 static int
9284 9251 fcp_device_changed(struct fcp_port *pptr, struct fcp_tgt *ptgt,
9285 9252 fc_portmap_t *map_entry, int link_cnt, int tgt_cnt, int cause)
9286 9253 {
9287 9254 ASSERT(mutex_owned(&pptr->port_mutex));
9288 9255
9289 9256 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9290 9257 fcp_trace, FCP_BUF_LEVEL_3, 0,
9291 9258 "Starting fcp_device_changed...");
9292 9259
9293 9260 /*
9294 9261 * The two cases where the port_device_changed is called is
9295 9262 * either it changes it's d_id or it's hard address.
9296 9263 */
9297 9264 if ((ptgt->tgt_d_id != map_entry->map_did.port_id) ||
9298 9265 (FC_TOP_EXTERNAL(pptr->port_topology) &&
9299 9266 (ptgt->tgt_hard_addr != map_entry->map_hard_addr.hard_addr))) {
9300 9267
9301 9268 /* offline this target */
9302 9269 mutex_enter(&ptgt->tgt_mutex);
9303 9270 if (!(ptgt->tgt_state & FCP_TGT_OFFLINE)) {
9304 9271 (void) fcp_offline_target(pptr, ptgt, link_cnt,
9305 9272 0, 1, NDI_DEVI_REMOVE);
9306 9273 }
9307 9274 mutex_exit(&ptgt->tgt_mutex);
9308 9275
9309 9276 fcp_log(CE_NOTE, pptr->port_dip,
9310 9277 "Change in target properties: Old D_ID=%x New D_ID=%x"
9311 9278 " Old HA=%x New HA=%x", ptgt->tgt_d_id,
9312 9279 map_entry->map_did.port_id, ptgt->tgt_hard_addr,
9313 9280 map_entry->map_hard_addr.hard_addr);
9314 9281 }
9315 9282
9316 9283 return (fcp_handle_mapflags(pptr, ptgt, map_entry,
9317 9284 link_cnt, tgt_cnt, cause));
9318 9285 }
9319 9286
9320 9287 /*
9321 9288 * Function: fcp_alloc_lun
9322 9289 *
9323 9290 * Description: Creates a new lun structure and adds it to the list
9324 9291 * of luns of the target.
9325 9292 *
9326 9293 * Argument: ptgt Target the lun will belong to.
9327 9294 *
9328 9295 * Return Value: NULL Failed
9329 9296 * Not NULL Succeeded
9330 9297 *
9331 9298 * Context: Kernel context
9332 9299 */
9333 9300 static struct fcp_lun *
9334 9301 fcp_alloc_lun(struct fcp_tgt *ptgt)
9335 9302 {
9336 9303 struct fcp_lun *plun;
9337 9304
9338 9305 plun = kmem_zalloc(sizeof (struct fcp_lun), KM_NOSLEEP);
9339 9306 if (plun != NULL) {
9340 9307 /*
9341 9308 * Initialize the mutex before putting in the target list
9342 9309 * especially before releasing the target mutex.
9343 9310 */
9344 9311 mutex_init(&plun->lun_mutex, NULL, MUTEX_DRIVER, NULL);
9345 9312 plun->lun_tgt = ptgt;
9346 9313
9347 9314 mutex_enter(&ptgt->tgt_mutex);
9348 9315 plun->lun_next = ptgt->tgt_lun;
9349 9316 ptgt->tgt_lun = plun;
9350 9317 plun->lun_old_guid = NULL;
9351 9318 plun->lun_old_guid_size = 0;
9352 9319 mutex_exit(&ptgt->tgt_mutex);
9353 9320 }
9354 9321
9355 9322 return (plun);
9356 9323 }
9357 9324
9358 9325 /*
9359 9326 * Function: fcp_dealloc_lun
9360 9327 *
9361 9328 * Description: Frees the LUN structure passed by the caller.
9362 9329 *
9363 9330 * Argument: plun LUN structure to free.
9364 9331 *
9365 9332 * Return Value: None
9366 9333 *
9367 9334 * Context: Kernel context.
9368 9335 */
9369 9336 static void
9370 9337 fcp_dealloc_lun(struct fcp_lun *plun)
9371 9338 {
9372 9339 mutex_enter(&plun->lun_mutex);
9373 9340 if (plun->lun_cip) {
9374 9341 fcp_remove_child(plun);
9375 9342 }
9376 9343 mutex_exit(&plun->lun_mutex);
9377 9344
9378 9345 mutex_destroy(&plun->lun_mutex);
9379 9346 if (plun->lun_guid) {
9380 9347 kmem_free(plun->lun_guid, plun->lun_guid_size);
9381 9348 }
9382 9349 if (plun->lun_old_guid) {
9383 9350 kmem_free(plun->lun_old_guid, plun->lun_old_guid_size);
9384 9351 }
9385 9352 kmem_free(plun, sizeof (*plun));
9386 9353 }
9387 9354
9388 9355 /*
9389 9356 * Function: fcp_alloc_tgt
9390 9357 *
9391 9358 * Description: Creates a new target structure and adds it to the port
9392 9359 * hash list.
9393 9360 *
9394 9361 * Argument: pptr fcp port structure
9395 9362 * *map_entry entry describing the target to create
9396 9363 * link_cnt Link state change counter
9397 9364 *
9398 9365 * Return Value: NULL Failed
9399 9366 * Not NULL Succeeded
9400 9367 *
9401 9368 * Context: Kernel context.
9402 9369 */
9403 9370 static struct fcp_tgt *
9404 9371 fcp_alloc_tgt(struct fcp_port *pptr, fc_portmap_t *map_entry, int link_cnt)
9405 9372 {
9406 9373 int hash;
9407 9374 uchar_t *wwn;
9408 9375 struct fcp_tgt *ptgt;
9409 9376
9410 9377 ptgt = kmem_zalloc(sizeof (*ptgt), KM_NOSLEEP);
9411 9378 if (ptgt != NULL) {
9412 9379 mutex_enter(&pptr->port_mutex);
9413 9380 if (link_cnt != pptr->port_link_cnt) {
9414 9381 /*
9415 9382 * oh oh -- another link reset
9416 9383 * in progress -- give up
9417 9384 */
9418 9385 mutex_exit(&pptr->port_mutex);
9419 9386 kmem_free(ptgt, sizeof (*ptgt));
9420 9387 ptgt = NULL;
9421 9388 } else {
9422 9389 /*
9423 9390 * initialize the mutex before putting in the port
9424 9391 * wwn list, especially before releasing the port
9425 9392 * mutex.
9426 9393 */
9427 9394 mutex_init(&ptgt->tgt_mutex, NULL, MUTEX_DRIVER, NULL);
9428 9395
9429 9396 /* add new target entry to the port's hash list */
9430 9397 wwn = (uchar_t *)&map_entry->map_pwwn;
9431 9398 hash = FCP_HASH(wwn);
9432 9399
9433 9400 ptgt->tgt_next = pptr->port_tgt_hash_table[hash];
9434 9401 pptr->port_tgt_hash_table[hash] = ptgt;
9435 9402
9436 9403 /* save cross-ptr */
9437 9404 ptgt->tgt_port = pptr;
9438 9405
9439 9406 ptgt->tgt_change_cnt = 1;
9440 9407
9441 9408 /* initialize the target manual_config_only flag */
9442 9409 if (fcp_enable_auto_configuration) {
9443 9410 ptgt->tgt_manual_config_only = 0;
9444 9411 } else {
9445 9412 ptgt->tgt_manual_config_only = 1;
9446 9413 }
9447 9414
9448 9415 mutex_exit(&pptr->port_mutex);
9449 9416 }
9450 9417 }
9451 9418
9452 9419 return (ptgt);
9453 9420 }
9454 9421
9455 9422 /*
9456 9423 * Function: fcp_dealloc_tgt
9457 9424 *
9458 9425 * Description: Frees the target structure passed by the caller.
9459 9426 *
9460 9427 * Argument: ptgt Target structure to free.
9461 9428 *
9462 9429 * Return Value: None
9463 9430 *
9464 9431 * Context: Kernel context.
9465 9432 */
9466 9433 static void
9467 9434 fcp_dealloc_tgt(struct fcp_tgt *ptgt)
9468 9435 {
9469 9436 mutex_destroy(&ptgt->tgt_mutex);
9470 9437 kmem_free(ptgt, sizeof (*ptgt));
9471 9438 }
9472 9439
9473 9440
9474 9441 /*
9475 9442 * Handle STATUS_QFULL and STATUS_BUSY by performing delayed retry
9476 9443 *
9477 9444 * Device discovery commands will not be retried for-ever as
9478 9445 * this will have repercussions on other devices that need to
9479 9446 * be submitted to the hotplug thread. After a quick glance
9480 9447 * at the SCSI-3 spec, it was found that the spec doesn't
9481 9448 * mandate a forever retry, rather recommends a delayed retry.
9482 9449 *
9483 9450 * Since Photon IB is single threaded, STATUS_BUSY is common
9484 9451 * in a 4+initiator environment. Make sure the total time
9485 9452 * spent on retries (including command timeout) does not
9486 9453 * 60 seconds
9487 9454 */
9488 9455 static void
9489 9456 fcp_queue_ipkt(struct fcp_port *pptr, fc_packet_t *fpkt)
9490 9457 {
9491 9458 struct fcp_ipkt *icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9492 9459 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
9493 9460
9494 9461 mutex_enter(&pptr->port_mutex);
9495 9462 mutex_enter(&ptgt->tgt_mutex);
9496 9463 if (FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
9497 9464 FCP_TRACE(fcp_logq, pptr->port_instbuf,
9498 9465 fcp_trace, FCP_BUF_LEVEL_2, 0,
9499 9466 "fcp_queue_ipkt,1:state change occured"
9500 9467 " for D_ID=0x%x", ptgt->tgt_d_id);
9501 9468 mutex_exit(&ptgt->tgt_mutex);
9502 9469 mutex_exit(&pptr->port_mutex);
9503 9470 (void) fcp_call_finish_init(pptr, ptgt, icmd->ipkt_link_cnt,
9504 9471 icmd->ipkt_change_cnt, icmd->ipkt_cause);
9505 9472 fcp_icmd_free(pptr, icmd);
9506 9473 return;
9507 9474 }
9508 9475 mutex_exit(&ptgt->tgt_mutex);
9509 9476
9510 9477 icmd->ipkt_restart = fcp_watchdog_time + icmd->ipkt_retries++;
9511 9478
9512 9479 if (pptr->port_ipkt_list != NULL) {
9513 9480 /* add pkt to front of doubly-linked list */
9514 9481 pptr->port_ipkt_list->ipkt_prev = icmd;
9515 9482 icmd->ipkt_next = pptr->port_ipkt_list;
9516 9483 pptr->port_ipkt_list = icmd;
9517 9484 icmd->ipkt_prev = NULL;
9518 9485 } else {
9519 9486 /* this is the first/only pkt on the list */
9520 9487 pptr->port_ipkt_list = icmd;
9521 9488 icmd->ipkt_next = NULL;
9522 9489 icmd->ipkt_prev = NULL;
9523 9490 }
9524 9491 mutex_exit(&pptr->port_mutex);
9525 9492 }
9526 9493
9527 9494 /*
9528 9495 * Function: fcp_transport
9529 9496 *
9530 9497 * Description: This function submits the Fibre Channel packet to the transort
9531 9498 * layer by calling fc_ulp_transport(). If fc_ulp_transport()
9532 9499 * fails the submission, the treatment depends on the value of
9533 9500 * the variable internal.
9534 9501 *
9535 9502 * Argument: port_handle fp/fctl port handle.
9536 9503 * *fpkt Packet to submit to the transport layer.
9537 9504 * internal Not zero when it's an internal packet.
9538 9505 *
9539 9506 * Return Value: FC_TRAN_BUSY
9540 9507 * FC_STATEC_BUSY
9541 9508 * FC_OFFLINE
9542 9509 * FC_LOGINREQ
9543 9510 * FC_DEVICE_BUSY
9544 9511 * FC_SUCCESS
9545 9512 */
9546 9513 static int
9547 9514 fcp_transport(opaque_t port_handle, fc_packet_t *fpkt, int internal)
9548 9515 {
9549 9516 int rval;
9550 9517
9551 9518 rval = fc_ulp_transport(port_handle, fpkt);
9552 9519 if (rval == FC_SUCCESS) {
9553 9520 return (rval);
9554 9521 }
9555 9522
9556 9523 /*
9557 9524 * LUN isn't marked BUSY or OFFLINE, so we got here to transport
9558 9525 * a command, if the underlying modules see that there is a state
9559 9526 * change, or if a port is OFFLINE, that means, that state change
9560 9527 * hasn't reached FCP yet, so re-queue the command for deferred
9561 9528 * submission.
9562 9529 */
9563 9530 if ((rval == FC_STATEC_BUSY) || (rval == FC_OFFLINE) ||
9564 9531 (rval == FC_LOGINREQ) || (rval == FC_DEVICE_BUSY) ||
9565 9532 (rval == FC_DEVICE_BUSY_NEW_RSCN) || (rval == FC_TRAN_BUSY)) {
9566 9533 /*
9567 9534 * Defer packet re-submission. Life hang is possible on
9568 9535 * internal commands if the port driver sends FC_STATEC_BUSY
9569 9536 * for ever, but that shouldn't happen in a good environment.
9570 9537 * Limiting re-transport for internal commands is probably a
9571 9538 * good idea..
9572 9539 * A race condition can happen when a port sees barrage of
9573 9540 * link transitions offline to online. If the FCTL has
9574 9541 * returned FC_STATEC_BUSY or FC_OFFLINE then none of the
9575 9542 * internal commands should be queued to do the discovery.
9576 9543 * The race condition is when an online comes and FCP starts
9577 9544 * its internal discovery and the link goes offline. It is
9578 9545 * possible that the statec_callback has not reached FCP
9579 9546 * and FCP is carrying on with its internal discovery.
9580 9547 * FC_STATEC_BUSY or FC_OFFLINE will be the first indication
9581 9548 * that the link has gone offline. At this point FCP should
9582 9549 * drop all the internal commands and wait for the
9583 9550 * statec_callback. It will be facilitated by incrementing
9584 9551 * port_link_cnt.
9585 9552 *
9586 9553 * For external commands, the (FC)pkt_timeout is decremented
9587 9554 * by the QUEUE Delay added by our driver, Care is taken to
9588 9555 * ensure that it doesn't become zero (zero means no timeout)
9589 9556 * If the time expires right inside driver queue itself,
9590 9557 * the watch thread will return it to the original caller
9591 9558 * indicating that the command has timed-out.
9592 9559 */
9593 9560 if (internal) {
9594 9561 char *op;
9595 9562 struct fcp_ipkt *icmd;
9596 9563
9597 9564 icmd = (struct fcp_ipkt *)fpkt->pkt_ulp_private;
9598 9565 switch (icmd->ipkt_opcode) {
9599 9566 case SCMD_REPORT_LUN:
9600 9567 op = "REPORT LUN";
9601 9568 break;
9602 9569
9603 9570 case SCMD_INQUIRY:
9604 9571 op = "INQUIRY";
9605 9572 break;
9606 9573
9607 9574 case SCMD_INQUIRY_PAGE83:
9608 9575 op = "INQUIRY-83";
9609 9576 break;
9610 9577
9611 9578 default:
9612 9579 op = "Internal SCSI COMMAND";
9613 9580 break;
9614 9581 }
9615 9582
9616 9583 if (fcp_handle_ipkt_errors(icmd->ipkt_port,
9617 9584 icmd->ipkt_tgt, icmd, rval, op) == DDI_SUCCESS) {
9618 9585 rval = FC_SUCCESS;
9619 9586 }
9620 9587 } else {
9621 9588 struct fcp_pkt *cmd;
9622 9589 struct fcp_port *pptr;
9623 9590
9624 9591 cmd = (struct fcp_pkt *)fpkt->pkt_ulp_private;
9625 9592 cmd->cmd_state = FCP_PKT_IDLE;
9626 9593 pptr = ADDR2FCP(&cmd->cmd_pkt->pkt_address);
9627 9594
9628 9595 if (cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) {
9629 9596 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
9630 9597 fcp_trace, FCP_BUF_LEVEL_9, 0,
9631 9598 "fcp_transport: xport busy for pkt %p",
9632 9599 cmd->cmd_pkt);
9633 9600 rval = FC_TRAN_BUSY;
9634 9601 } else {
9635 9602 fcp_queue_pkt(pptr, cmd);
9636 9603 rval = FC_SUCCESS;
9637 9604 }
9638 9605 }
9639 9606 }
9640 9607
9641 9608 return (rval);
9642 9609 }
9643 9610
9644 9611 /*VARARGS3*/
9645 9612 static void
9646 9613 fcp_log(int level, dev_info_t *dip, const char *fmt, ...)
9647 9614 {
9648 9615 char buf[256];
9649 9616 va_list ap;
9650 9617
9651 9618 if (dip == NULL) {
9652 9619 dip = fcp_global_dip;
9653 9620 }
9654 9621
9655 9622 va_start(ap, fmt);
9656 9623 (void) vsprintf(buf, fmt, ap);
9657 9624 va_end(ap);
9658 9625
9659 9626 scsi_log(dip, "fcp", level, buf);
9660 9627 }
9661 9628
9662 9629 /*
9663 9630 * This function retries NS registry of FC4 type.
9664 9631 * It assumes that fcp_mutex is held.
9665 9632 * The function does nothing if topology is not fabric
9666 9633 * So, the topology has to be set before this function can be called
9667 9634 */
9668 9635 static void
9669 9636 fcp_retry_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9670 9637 {
9671 9638 int rval;
9672 9639
9673 9640 ASSERT(MUTEX_HELD(&pptr->port_mutex));
9674 9641
9675 9642 if (((pptr->port_state & FCP_STATE_NS_REG_FAILED) == 0) ||
9676 9643 ((pptr->port_topology != FC_TOP_FABRIC) &&
9677 9644 (pptr->port_topology != FC_TOP_PUBLIC_LOOP))) {
9678 9645 if (pptr->port_state & FCP_STATE_NS_REG_FAILED) {
9679 9646 pptr->port_state &= ~FCP_STATE_NS_REG_FAILED;
9680 9647 }
9681 9648 return;
9682 9649 }
9683 9650 mutex_exit(&pptr->port_mutex);
9684 9651 rval = fcp_do_ns_registry(pptr, s_id);
9685 9652 mutex_enter(&pptr->port_mutex);
9686 9653
9687 9654 if (rval == 0) {
9688 9655 /* Registry successful. Reset flag */
9689 9656 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
9690 9657 }
9691 9658 }
9692 9659
9693 9660 /*
9694 9661 * This function registers the ULP with the switch by calling transport i/f
9695 9662 */
9696 9663 static int
9697 9664 fcp_do_ns_registry(struct fcp_port *pptr, uint32_t s_id)
9698 9665 {
9699 9666 fc_ns_cmd_t ns_cmd;
9700 9667 ns_rfc_type_t rfc;
9701 9668 uint32_t types[8];
9702 9669
9703 9670 /*
9704 9671 * Prepare the Name server structure to
9705 9672 * register with the transport in case of
9706 9673 * Fabric configuration.
9707 9674 */
9708 9675 bzero(&rfc, sizeof (rfc));
9709 9676 bzero(types, sizeof (types));
9710 9677
9711 9678 types[FC4_TYPE_WORD_POS(FC_TYPE_SCSI_FCP)] =
9712 9679 (1 << FC4_TYPE_BIT_POS(FC_TYPE_SCSI_FCP));
9713 9680
9714 9681 rfc.rfc_port_id.port_id = s_id;
9715 9682 bcopy(types, rfc.rfc_types, sizeof (types));
9716 9683
9717 9684 ns_cmd.ns_flags = 0;
9718 9685 ns_cmd.ns_cmd = NS_RFT_ID;
9719 9686 ns_cmd.ns_req_len = sizeof (rfc);
9720 9687 ns_cmd.ns_req_payload = (caddr_t)&rfc;
9721 9688 ns_cmd.ns_resp_len = 0;
9722 9689 ns_cmd.ns_resp_payload = NULL;
9723 9690
9724 9691 /*
9725 9692 * Perform the Name Server Registration for SCSI_FCP FC4 Type.
9726 9693 */
9727 9694 if (fc_ulp_port_ns(pptr->port_fp_handle, NULL, &ns_cmd)) {
9728 9695 fcp_log(CE_WARN, pptr->port_dip,
9729 9696 "!ns_registry: failed name server registration");
9730 9697 return (1);
9731 9698 }
9732 9699
9733 9700 return (0);
9734 9701 }
9735 9702
9736 9703 /*
9737 9704 * Function: fcp_handle_port_attach
9738 9705 *
9739 9706 * Description: This function is called from fcp_port_attach() to attach a
9740 9707 * new port. This routine does the following:
9741 9708 *
9742 9709 * 1) Allocates an fcp_port structure and initializes it.
9743 9710 * 2) Tries to register the new FC-4 (FCP) capablity with the name
9744 9711 * server.
9745 9712 * 3) Kicks off the enumeration of the targets/luns visible
9746 9713 * through this new port. That is done by calling
9747 9714 * fcp_statec_callback() if the port is online.
9748 9715 *
9749 9716 * Argument: ulph fp/fctl port handle.
9750 9717 * *pinfo Port information.
9751 9718 * s_id Port ID.
9752 9719 * instance Device instance number for the local port
9753 9720 * (returned by ddi_get_instance()).
9754 9721 *
9755 9722 * Return Value: DDI_SUCCESS
9756 9723 * DDI_FAILURE
9757 9724 *
9758 9725 * Context: User and Kernel context.
9759 9726 */
9760 9727 /*ARGSUSED*/
9761 9728 int
9762 9729 fcp_handle_port_attach(opaque_t ulph, fc_ulp_port_info_t *pinfo,
9763 9730 uint32_t s_id, int instance)
9764 9731 {
9765 9732 int res = DDI_FAILURE;
9766 9733 scsi_hba_tran_t *tran;
9767 9734 int mutex_initted = FALSE;
9768 9735 int hba_attached = FALSE;
9769 9736 int soft_state_linked = FALSE;
9770 9737 int event_bind = FALSE;
9771 9738 struct fcp_port *pptr;
9772 9739 fc_portmap_t *tmp_list = NULL;
9773 9740 uint32_t max_cnt, alloc_cnt;
9774 9741 uchar_t *boot_wwn = NULL;
9775 9742 uint_t nbytes;
9776 9743 int manual_cfg;
9777 9744
9778 9745 /*
9779 9746 * this port instance attaching for the first time (or after
9780 9747 * being detached before)
9781 9748 */
9782 9749 FCP_TRACE(fcp_logq, "fcp", fcp_trace,
9783 9750 FCP_BUF_LEVEL_3, 0, "port attach: for port %d", instance);
9784 9751
9785 9752 if (ddi_soft_state_zalloc(fcp_softstate, instance) != DDI_SUCCESS) {
9786 9753 cmn_err(CE_WARN, "fcp: Softstate struct alloc failed"
9787 9754 "parent dip: %p; instance: %d", (void *)pinfo->port_dip,
9788 9755 instance);
9789 9756 return (res);
9790 9757 }
9791 9758
9792 9759 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
9793 9760 /* this shouldn't happen */
9794 9761 ddi_soft_state_free(fcp_softstate, instance);
9795 9762 cmn_err(CE_WARN, "fcp: bad soft state");
9796 9763 return (res);
9797 9764 }
9798 9765
9799 9766 (void) sprintf(pptr->port_instbuf, "fcp(%d)", instance);
9800 9767
9801 9768 /*
9802 9769 * Make a copy of ulp_port_info as fctl allocates
9803 9770 * a temp struct.
9804 9771 */
9805 9772 (void) fcp_cp_pinfo(pptr, pinfo);
9806 9773
9807 9774 /*
9808 9775 * Check for manual_configuration_only property.
9809 9776 * Enable manual configurtion if the property is
9810 9777 * set to 1, otherwise disable manual configuration.
9811 9778 */
9812 9779 if ((manual_cfg = ddi_prop_get_int(DDI_DEV_T_ANY, pptr->port_dip,
9813 9780 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS,
9814 9781 MANUAL_CFG_ONLY,
9815 9782 -1)) != -1) {
9816 9783 if (manual_cfg == 1) {
9817 9784 char *pathname;
9818 9785 pathname = kmem_zalloc(MAXPATHLEN, KM_SLEEP);
9819 9786 (void) ddi_pathname(pptr->port_dip, pathname);
9820 9787 cmn_err(CE_NOTE,
9821 9788 "%s (%s%d) %s is enabled via %s.conf.",
9822 9789 pathname,
9823 9790 ddi_driver_name(pptr->port_dip),
9824 9791 ddi_get_instance(pptr->port_dip),
9825 9792 MANUAL_CFG_ONLY,
9826 9793 ddi_driver_name(pptr->port_dip));
9827 9794 fcp_enable_auto_configuration = 0;
9828 9795 kmem_free(pathname, MAXPATHLEN);
9829 9796 }
9830 9797 }
9831 9798 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9832 9799 pptr->port_link_cnt = 1;
9833 9800 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_link_cnt));
9834 9801 pptr->port_id = s_id;
9835 9802 pptr->port_instance = instance;
9836 9803 _NOTE(NOW_INVISIBLE_TO_OTHER_THREADS(pptr->port_state));
9837 9804 pptr->port_state = FCP_STATE_INIT;
9838 9805 if (pinfo->port_acc_attr == NULL) {
9839 9806 /*
9840 9807 * The corresponding FCA doesn't support DMA at all
9841 9808 */
9842 9809 pptr->port_state |= FCP_STATE_FCA_IS_NODMA;
9843 9810 }
9844 9811
9845 9812 _NOTE(NOW_VISIBLE_TO_OTHER_THREADS(pptr->port_state));
9846 9813
9847 9814 if (!(pptr->port_state & FCP_STATE_FCA_IS_NODMA)) {
9848 9815 /*
9849 9816 * If FCA supports DMA in SCSI data phase, we need preallocate
9850 9817 * dma cookie, so stash the cookie size
9851 9818 */
9852 9819 pptr->port_dmacookie_sz = sizeof (ddi_dma_cookie_t) *
9853 9820 pptr->port_data_dma_attr.dma_attr_sgllen;
9854 9821 }
9855 9822
9856 9823 /*
9857 9824 * The two mutexes of fcp_port are initialized. The variable
9858 9825 * mutex_initted is incremented to remember that fact. That variable
9859 9826 * is checked when the routine fails and the mutexes have to be
9860 9827 * destroyed.
9861 9828 */
9862 9829 mutex_init(&pptr->port_mutex, NULL, MUTEX_DRIVER, NULL);
9863 9830 mutex_init(&pptr->port_pkt_mutex, NULL, MUTEX_DRIVER, NULL);
9864 9831 mutex_initted++;
9865 9832
9866 9833 /*
9867 9834 * The SCSI tran structure is allocate and initialized now.
9868 9835 */
9869 9836 if ((tran = scsi_hba_tran_alloc(pptr->port_dip, 0)) == NULL) {
9870 9837 fcp_log(CE_WARN, pptr->port_dip,
9871 9838 "!fcp%d: scsi_hba_tran_alloc failed", instance);
9872 9839 goto fail;
9873 9840 }
9874 9841
9875 9842 /* link in the transport structure then fill it in */
9876 9843 pptr->port_tran = tran;
9877 9844 tran->tran_hba_private = pptr;
9878 9845 tran->tran_tgt_init = fcp_scsi_tgt_init;
9879 9846 tran->tran_tgt_probe = NULL;
9880 9847 tran->tran_tgt_free = fcp_scsi_tgt_free;
9881 9848 tran->tran_start = fcp_scsi_start;
9882 9849 tran->tran_reset = fcp_scsi_reset;
9883 9850 tran->tran_abort = fcp_scsi_abort;
9884 9851 tran->tran_getcap = fcp_scsi_getcap;
9885 9852 tran->tran_setcap = fcp_scsi_setcap;
9886 9853 tran->tran_init_pkt = NULL;
9887 9854 tran->tran_destroy_pkt = NULL;
9888 9855 tran->tran_dmafree = NULL;
9889 9856 tran->tran_sync_pkt = NULL;
9890 9857 tran->tran_reset_notify = fcp_scsi_reset_notify;
9891 9858 tran->tran_get_bus_addr = fcp_scsi_get_bus_addr;
9892 9859 tran->tran_get_name = fcp_scsi_get_name;
9893 9860 tran->tran_clear_aca = NULL;
9894 9861 tran->tran_clear_task_set = NULL;
9895 9862 tran->tran_terminate_task = NULL;
9896 9863 tran->tran_get_eventcookie = fcp_scsi_bus_get_eventcookie;
9897 9864 tran->tran_add_eventcall = fcp_scsi_bus_add_eventcall;
9898 9865 tran->tran_remove_eventcall = fcp_scsi_bus_remove_eventcall;
9899 9866 tran->tran_post_event = fcp_scsi_bus_post_event;
9900 9867 tran->tran_quiesce = NULL;
9901 9868 tran->tran_unquiesce = NULL;
9902 9869 tran->tran_bus_reset = NULL;
9903 9870 tran->tran_bus_config = fcp_scsi_bus_config;
9904 9871 tran->tran_bus_unconfig = fcp_scsi_bus_unconfig;
9905 9872 tran->tran_bus_power = NULL;
9906 9873 tran->tran_interconnect_type = INTERCONNECT_FABRIC;
9907 9874
9908 9875 tran->tran_pkt_constructor = fcp_kmem_cache_constructor;
9909 9876 tran->tran_pkt_destructor = fcp_kmem_cache_destructor;
9910 9877 tran->tran_setup_pkt = fcp_pkt_setup;
9911 9878 tran->tran_teardown_pkt = fcp_pkt_teardown;
9912 9879 tran->tran_hba_len = pptr->port_priv_pkt_len +
9913 9880 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz;
9914 9881 if (pptr->port_state & FCP_STATE_FCA_IS_NODMA) {
9915 9882 /*
9916 9883 * If FCA don't support DMA, then we use different vectors to
9917 9884 * minimize the effects on DMA code flow path
9918 9885 */
9919 9886 tran->tran_start = fcp_pseudo_start;
9920 9887 tran->tran_init_pkt = fcp_pseudo_init_pkt;
9921 9888 tran->tran_destroy_pkt = fcp_pseudo_destroy_pkt;
9922 9889 tran->tran_sync_pkt = fcp_pseudo_sync_pkt;
9923 9890 tran->tran_dmafree = fcp_pseudo_dmafree;
9924 9891 tran->tran_setup_pkt = NULL;
9925 9892 tran->tran_teardown_pkt = NULL;
9926 9893 tran->tran_pkt_constructor = NULL;
9927 9894 tran->tran_pkt_destructor = NULL;
9928 9895 pptr->port_data_dma_attr = pseudo_fca_dma_attr;
9929 9896 }
9930 9897
9931 9898 /*
9932 9899 * Allocate an ndi event handle
9933 9900 */
9934 9901 pptr->port_ndi_event_defs = (ndi_event_definition_t *)
9935 9902 kmem_zalloc(sizeof (fcp_ndi_event_defs), KM_SLEEP);
9936 9903
9937 9904 bcopy(fcp_ndi_event_defs, pptr->port_ndi_event_defs,
9938 9905 sizeof (fcp_ndi_event_defs));
9939 9906
9940 9907 (void) ndi_event_alloc_hdl(pptr->port_dip, NULL,
9941 9908 &pptr->port_ndi_event_hdl, NDI_SLEEP);
9942 9909
9943 9910 pptr->port_ndi_events.ndi_events_version = NDI_EVENTS_REV1;
9944 9911 pptr->port_ndi_events.ndi_n_events = FCP_N_NDI_EVENTS;
9945 9912 pptr->port_ndi_events.ndi_event_defs = pptr->port_ndi_event_defs;
9946 9913
9947 9914 if (DEVI_IS_ATTACHING(pptr->port_dip) &&
9948 9915 (ndi_event_bind_set(pptr->port_ndi_event_hdl,
9949 9916 &pptr->port_ndi_events, NDI_SLEEP) != NDI_SUCCESS)) {
9950 9917 goto fail;
9951 9918 }
9952 9919 event_bind++; /* Checked in fail case */
9953 9920
9954 9921 if (scsi_hba_attach_setup(pptr->port_dip, &pptr->port_data_dma_attr,
9955 9922 tran, SCSI_HBA_ADDR_COMPLEX | SCSI_HBA_TRAN_SCB)
9956 9923 != DDI_SUCCESS) {
9957 9924 fcp_log(CE_WARN, pptr->port_dip,
9958 9925 "!fcp%d: scsi_hba_attach_setup failed", instance);
9959 9926 goto fail;
9960 9927 }
9961 9928 hba_attached++; /* Checked in fail case */
9962 9929
9963 9930 pptr->port_mpxio = 0;
9964 9931 if (mdi_phci_register(MDI_HCI_CLASS_SCSI, pptr->port_dip, 0) ==
9965 9932 MDI_SUCCESS) {
9966 9933 pptr->port_mpxio++;
9967 9934 }
9968 9935
9969 9936 /*
9970 9937 * The following code is putting the new port structure in the global
9971 9938 * list of ports and, if it is the first port to attach, it start the
9972 9939 * fcp_watchdog_tick.
9973 9940 *
9974 9941 * Why put this new port in the global before we are done attaching it?
9975 9942 * We are actually making the structure globally known before we are
9976 9943 * done attaching it. The reason for that is: because of the code that
9977 9944 * follows. At this point the resources to handle the port are
9978 9945 * allocated. This function is now going to do the following:
9979 9946 *
9980 9947 * 1) It is going to try to register with the name server advertizing
9981 9948 * the new FCP capability of the port.
9982 9949 * 2) It is going to play the role of the fp/fctl layer by building
9983 9950 * a list of worlwide names reachable through this port and call
9984 9951 * itself on fcp_statec_callback(). That requires the port to
9985 9952 * be part of the global list.
9986 9953 */
9987 9954 mutex_enter(&fcp_global_mutex);
9988 9955 if (fcp_port_head == NULL) {
9989 9956 fcp_read_blacklist(pinfo->port_dip, &fcp_lun_blacklist);
9990 9957 }
9991 9958 pptr->port_next = fcp_port_head;
9992 9959 fcp_port_head = pptr;
9993 9960 soft_state_linked++;
9994 9961
9995 9962 if (fcp_watchdog_init++ == 0) {
9996 9963 fcp_watchdog_tick = fcp_watchdog_timeout *
9997 9964 drv_usectohz(1000000);
9998 9965 fcp_watchdog_id = timeout(fcp_watch, NULL,
9999 9966 fcp_watchdog_tick);
10000 9967 }
10001 9968 mutex_exit(&fcp_global_mutex);
10002 9969
10003 9970 /*
10004 9971 * Here an attempt is made to register with the name server, the new
10005 9972 * FCP capability. That is done using an RTF_ID to the name server.
10006 9973 * It is done synchronously. The function fcp_do_ns_registry()
10007 9974 * doesn't return till the name server responded.
10008 9975 * On failures, just ignore it for now and it will get retried during
10009 9976 * state change callbacks. We'll set a flag to show this failure
10010 9977 */
10011 9978 if (fcp_do_ns_registry(pptr, s_id)) {
10012 9979 mutex_enter(&pptr->port_mutex);
10013 9980 pptr->port_state |= FCP_STATE_NS_REG_FAILED;
10014 9981 mutex_exit(&pptr->port_mutex);
10015 9982 } else {
10016 9983 mutex_enter(&pptr->port_mutex);
10017 9984 pptr->port_state &= ~(FCP_STATE_NS_REG_FAILED);
10018 9985 mutex_exit(&pptr->port_mutex);
10019 9986 }
10020 9987
10021 9988 /*
10022 9989 * Lookup for boot WWN property
10023 9990 */
10024 9991 if (modrootloaded != 1) {
10025 9992 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY,
10026 9993 ddi_get_parent(pinfo->port_dip),
10027 9994 DDI_PROP_DONTPASS, OBP_BOOT_WWN,
10028 9995 &boot_wwn, &nbytes) == DDI_PROP_SUCCESS) &&
10029 9996 (nbytes == FC_WWN_SIZE)) {
10030 9997 bcopy(boot_wwn, pptr->port_boot_wwn, FC_WWN_SIZE);
10031 9998 }
10032 9999 if (boot_wwn) {
10033 10000 ddi_prop_free(boot_wwn);
10034 10001 }
10035 10002 }
10036 10003
10037 10004 /*
10038 10005 * Handle various topologies and link states.
10039 10006 */
10040 10007 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
10041 10008 case FC_STATE_OFFLINE:
10042 10009
10043 10010 /*
10044 10011 * we're attaching a port where the link is offline
10045 10012 *
10046 10013 * Wait for ONLINE, at which time a state
10047 10014 * change will cause a statec_callback
10048 10015 *
10049 10016 * in the mean time, do not do anything
10050 10017 */
10051 10018 res = DDI_SUCCESS;
10052 10019 pptr->port_state |= FCP_STATE_OFFLINE;
10053 10020 break;
10054 10021
10055 10022 case FC_STATE_ONLINE: {
10056 10023 if (pptr->port_topology == FC_TOP_UNKNOWN) {
10057 10024 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
10058 10025 res = DDI_SUCCESS;
10059 10026 break;
10060 10027 }
10061 10028 /*
10062 10029 * discover devices and create nodes (a private
10063 10030 * loop or point-to-point)
10064 10031 */
10065 10032 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
10066 10033
10067 10034 /*
10068 10035 * At this point we are going to build a list of all the ports
10069 10036 * that can be reached through this local port. It looks like
10070 10037 * we cannot handle more than FCP_MAX_DEVICES per local port
10071 10038 * (128).
10072 10039 */
10073 10040 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
10074 10041 sizeof (fc_portmap_t) * FCP_MAX_DEVICES,
10075 10042 KM_NOSLEEP)) == NULL) {
10076 10043 fcp_log(CE_WARN, pptr->port_dip,
10077 10044 "!fcp%d: failed to allocate portmap",
10078 10045 instance);
10079 10046 goto fail;
10080 10047 }
10081 10048
10082 10049 /*
10083 10050 * fc_ulp_getportmap() is going to provide us with the list of
10084 10051 * remote ports in the buffer we just allocated. The way the
10085 10052 * list is going to be retrieved depends on the topology.
10086 10053 * However, if we are connected to a Fabric, a name server
10087 10054 * request may be sent to get the list of FCP capable ports.
10088 10055 * It should be noted that is the case the request is
10089 10056 * synchronous. This means we are stuck here till the name
10090 10057 * server replies. A lot of things can change during that time
10091 10058 * and including, may be, being called on
10092 10059 * fcp_statec_callback() for different reasons. I'm not sure
10093 10060 * the code can handle that.
10094 10061 */
10095 10062 max_cnt = FCP_MAX_DEVICES;
10096 10063 alloc_cnt = FCP_MAX_DEVICES;
10097 10064 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
10098 10065 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
10099 10066 FC_SUCCESS) {
10100 10067 caddr_t msg;
10101 10068
10102 10069 (void) fc_ulp_error(res, &msg);
10103 10070
10104 10071 /*
10105 10072 * this just means the transport is
10106 10073 * busy perhaps building a portmap so,
10107 10074 * for now, succeed this port attach
10108 10075 * when the transport has a new map,
10109 10076 * it'll send us a state change then
10110 10077 */
10111 10078 fcp_log(CE_WARN, pptr->port_dip,
10112 10079 "!failed to get port map : %s", msg);
10113 10080
10114 10081 res = DDI_SUCCESS;
10115 10082 break; /* go return result */
10116 10083 }
10117 10084 if (max_cnt > alloc_cnt) {
10118 10085 alloc_cnt = max_cnt;
10119 10086 }
10120 10087
10121 10088 /*
10122 10089 * We are now going to call fcp_statec_callback() ourselves.
10123 10090 * By issuing this call we are trying to kick off the enumera-
10124 10091 * tion process.
10125 10092 */
10126 10093 /*
10127 10094 * let the state change callback do the SCSI device
10128 10095 * discovery and create the devinfos
10129 10096 */
10130 10097 fcp_statec_callback(ulph, pptr->port_fp_handle,
10131 10098 pptr->port_phys_state, pptr->port_topology, tmp_list,
10132 10099 max_cnt, pptr->port_id);
10133 10100
10134 10101 res = DDI_SUCCESS;
10135 10102 break;
10136 10103 }
10137 10104
10138 10105 default:
10139 10106 /* unknown port state */
10140 10107 fcp_log(CE_WARN, pptr->port_dip,
10141 10108 "!fcp%d: invalid port state at attach=0x%x",
10142 10109 instance, pptr->port_phys_state);
10143 10110
10144 10111 mutex_enter(&pptr->port_mutex);
10145 10112 pptr->port_phys_state = FCP_STATE_OFFLINE;
10146 10113 mutex_exit(&pptr->port_mutex);
10147 10114
10148 10115 res = DDI_SUCCESS;
10149 10116 break;
10150 10117 }
10151 10118
10152 10119 /* free temp list if used */
10153 10120 if (tmp_list != NULL) {
10154 10121 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10155 10122 }
10156 10123
10157 10124 /* note the attach time */
10158 10125 pptr->port_attach_time = ddi_get_lbolt64();
10159 10126
10160 10127 /* all done */
10161 10128 return (res);
10162 10129
10163 10130 /* a failure we have to clean up after */
10164 10131 fail:
10165 10132 fcp_log(CE_WARN, pptr->port_dip, "!failed to attach to port");
10166 10133
10167 10134 if (soft_state_linked) {
10168 10135 /* remove this fcp_port from the linked list */
10169 10136 (void) fcp_soft_state_unlink(pptr);
10170 10137 }
10171 10138
10172 10139 /* unbind and free event set */
10173 10140 if (pptr->port_ndi_event_hdl) {
10174 10141 if (event_bind) {
10175 10142 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10176 10143 &pptr->port_ndi_events, NDI_SLEEP);
10177 10144 }
10178 10145 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10179 10146 }
10180 10147
10181 10148 if (pptr->port_ndi_event_defs) {
10182 10149 (void) kmem_free(pptr->port_ndi_event_defs,
10183 10150 sizeof (fcp_ndi_event_defs));
10184 10151 }
10185 10152
10186 10153 /*
10187 10154 * Clean up mpxio stuff
10188 10155 */
10189 10156 if (pptr->port_mpxio) {
10190 10157 (void) mdi_phci_unregister(pptr->port_dip, 0);
10191 10158 pptr->port_mpxio--;
10192 10159 }
10193 10160
10194 10161 /* undo SCSI HBA setup */
10195 10162 if (hba_attached) {
10196 10163 (void) scsi_hba_detach(pptr->port_dip);
10197 10164 }
10198 10165 if (pptr->port_tran != NULL) {
10199 10166 scsi_hba_tran_free(pptr->port_tran);
10200 10167 }
10201 10168
10202 10169 mutex_enter(&fcp_global_mutex);
10203 10170
10204 10171 /*
10205 10172 * We check soft_state_linked, because it is incremented right before
10206 10173 * we call increment fcp_watchdog_init. Therefore, we know if
10207 10174 * soft_state_linked is still FALSE, we do not want to decrement
10208 10175 * fcp_watchdog_init or possibly call untimeout.
10209 10176 */
10210 10177
10211 10178 if (soft_state_linked) {
10212 10179 if (--fcp_watchdog_init == 0) {
10213 10180 timeout_id_t tid = fcp_watchdog_id;
10214 10181
10215 10182 mutex_exit(&fcp_global_mutex);
10216 10183 (void) untimeout(tid);
10217 10184 } else {
10218 10185 mutex_exit(&fcp_global_mutex);
10219 10186 }
10220 10187 } else {
10221 10188 mutex_exit(&fcp_global_mutex);
10222 10189 }
10223 10190
10224 10191 if (mutex_initted) {
10225 10192 mutex_destroy(&pptr->port_mutex);
10226 10193 mutex_destroy(&pptr->port_pkt_mutex);
10227 10194 }
10228 10195
10229 10196 if (tmp_list != NULL) {
10230 10197 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
10231 10198 }
10232 10199
10233 10200 /* this makes pptr invalid */
10234 10201 ddi_soft_state_free(fcp_softstate, instance);
10235 10202
10236 10203 return (DDI_FAILURE);
10237 10204 }
10238 10205
10239 10206
10240 10207 static int
10241 10208 fcp_handle_port_detach(struct fcp_port *pptr, int flag, int instance)
10242 10209 {
10243 10210 int count = 0;
10244 10211
10245 10212 mutex_enter(&pptr->port_mutex);
10246 10213
10247 10214 /*
10248 10215 * if the port is powered down or suspended, nothing else
10249 10216 * to do; just return.
10250 10217 */
10251 10218 if (flag != FCP_STATE_DETACHING) {
10252 10219 if (pptr->port_state & (FCP_STATE_POWER_DOWN |
10253 10220 FCP_STATE_SUSPENDED)) {
10254 10221 pptr->port_state |= flag;
10255 10222 mutex_exit(&pptr->port_mutex);
10256 10223 return (FC_SUCCESS);
10257 10224 }
10258 10225 }
10259 10226
10260 10227 if (pptr->port_state & FCP_STATE_IN_MDI) {
10261 10228 mutex_exit(&pptr->port_mutex);
10262 10229 return (FC_FAILURE);
10263 10230 }
10264 10231
10265 10232 FCP_TRACE(fcp_logq, pptr->port_instbuf,
10266 10233 fcp_trace, FCP_BUF_LEVEL_2, 0,
10267 10234 "fcp_handle_port_detach: port is detaching");
10268 10235
10269 10236 pptr->port_state |= flag;
10270 10237
10271 10238 /*
10272 10239 * Wait for any ongoing reconfig/ipkt to complete, that
10273 10240 * ensures the freeing to targets/luns is safe.
10274 10241 * No more ref to this port should happen from statec/ioctl
10275 10242 * after that as it was removed from the global port list.
10276 10243 */
10277 10244 while (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10278 10245 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10279 10246 /*
10280 10247 * Let's give sufficient time for reconfig/ipkt
10281 10248 * to complete.
10282 10249 */
10283 10250 if (count++ >= FCP_ICMD_DEADLINE) {
10284 10251 break;
10285 10252 }
10286 10253 mutex_exit(&pptr->port_mutex);
10287 10254 delay(drv_usectohz(1000000));
10288 10255 mutex_enter(&pptr->port_mutex);
10289 10256 }
10290 10257
10291 10258 /*
10292 10259 * if the driver is still busy then fail to
10293 10260 * suspend/power down.
10294 10261 */
10295 10262 if (pptr->port_tmp_cnt || pptr->port_ipkt_cnt ||
10296 10263 (pptr->port_state & FCP_STATE_IN_WATCHDOG)) {
10297 10264 pptr->port_state &= ~flag;
10298 10265 mutex_exit(&pptr->port_mutex);
10299 10266 return (FC_FAILURE);
10300 10267 }
10301 10268
10302 10269 if (flag == FCP_STATE_DETACHING) {
10303 10270 pptr = fcp_soft_state_unlink(pptr);
10304 10271 ASSERT(pptr != NULL);
10305 10272 }
10306 10273
10307 10274 pptr->port_link_cnt++;
10308 10275 pptr->port_state |= FCP_STATE_OFFLINE;
10309 10276 pptr->port_state &= ~(FCP_STATE_ONLINING | FCP_STATE_ONLINE);
10310 10277
10311 10278 fcp_update_state(pptr, (FCP_LUN_BUSY | FCP_LUN_MARK),
10312 10279 FCP_CAUSE_LINK_DOWN);
10313 10280 mutex_exit(&pptr->port_mutex);
10314 10281
10315 10282 /* kill watch dog timer if we're the last */
10316 10283 mutex_enter(&fcp_global_mutex);
10317 10284 if (--fcp_watchdog_init == 0) {
10318 10285 timeout_id_t tid = fcp_watchdog_id;
10319 10286 mutex_exit(&fcp_global_mutex);
10320 10287 (void) untimeout(tid);
10321 10288 } else {
10322 10289 mutex_exit(&fcp_global_mutex);
10323 10290 }
10324 10291
10325 10292 /* clean up the port structures */
10326 10293 if (flag == FCP_STATE_DETACHING) {
10327 10294 fcp_cleanup_port(pptr, instance);
10328 10295 }
10329 10296
10330 10297 return (FC_SUCCESS);
10331 10298 }
10332 10299
10333 10300
10334 10301 static void
10335 10302 fcp_cleanup_port(struct fcp_port *pptr, int instance)
10336 10303 {
10337 10304 ASSERT(pptr != NULL);
10338 10305
10339 10306 /* unbind and free event set */
10340 10307 if (pptr->port_ndi_event_hdl) {
10341 10308 (void) ndi_event_unbind_set(pptr->port_ndi_event_hdl,
10342 10309 &pptr->port_ndi_events, NDI_SLEEP);
10343 10310 (void) ndi_event_free_hdl(pptr->port_ndi_event_hdl);
10344 10311 }
10345 10312
10346 10313 if (pptr->port_ndi_event_defs) {
10347 10314 (void) kmem_free(pptr->port_ndi_event_defs,
10348 10315 sizeof (fcp_ndi_event_defs));
10349 10316 }
10350 10317
10351 10318 /* free the lun/target structures and devinfos */
10352 10319 fcp_free_targets(pptr);
10353 10320
10354 10321 /*
10355 10322 * Clean up mpxio stuff
10356 10323 */
10357 10324 if (pptr->port_mpxio) {
10358 10325 (void) mdi_phci_unregister(pptr->port_dip, 0);
10359 10326 pptr->port_mpxio--;
10360 10327 }
10361 10328
10362 10329 /* clean up SCSA stuff */
10363 10330 (void) scsi_hba_detach(pptr->port_dip);
10364 10331 if (pptr->port_tran != NULL) {
10365 10332 scsi_hba_tran_free(pptr->port_tran);
10366 10333 }
10367 10334
10368 10335 #ifdef KSTATS_CODE
10369 10336 /* clean up kstats */
10370 10337 if (pptr->fcp_ksp != NULL) {
10371 10338 kstat_delete(pptr->fcp_ksp);
10372 10339 }
10373 10340 #endif
10374 10341
10375 10342 /* clean up soft state mutexes/condition variables */
10376 10343 mutex_destroy(&pptr->port_mutex);
10377 10344 mutex_destroy(&pptr->port_pkt_mutex);
10378 10345
10379 10346 /* all done with soft state */
10380 10347 ddi_soft_state_free(fcp_softstate, instance);
10381 10348 }
10382 10349
10383 10350 /*
10384 10351 * Function: fcp_kmem_cache_constructor
10385 10352 *
10386 10353 * Description: This function allocates and initializes the resources required
10387 10354 * to build a scsi_pkt structure the target driver. The result
10388 10355 * of the allocation and initialization will be cached in the
10389 10356 * memory cache. As DMA resources may be allocated here, that
10390 10357 * means DMA resources will be tied up in the cache manager.
10391 10358 * This is a tradeoff that has been made for performance reasons.
10392 10359 *
10393 10360 * Argument: *buf Memory to preinitialize.
10394 10361 * *arg FCP port structure (fcp_port).
10395 10362 * kmflags Value passed to kmem_cache_alloc() and
10396 10363 * propagated to the constructor.
10397 10364 *
10398 10365 * Return Value: 0 Allocation/Initialization was successful.
10399 10366 * -1 Allocation or Initialization failed.
10400 10367 *
10401 10368 *
10402 10369 * If the returned value is 0, the buffer is initialized like this:
10403 10370 *
10404 10371 * +================================+
10405 10372 * +----> | struct scsi_pkt |
10406 10373 * | | |
10407 10374 * | +--- | pkt_ha_private |
10408 10375 * | | | |
10409 10376 * | | +================================+
10410 10377 * | |
10411 10378 * | | +================================+
10412 10379 * | +--> | struct fcp_pkt | <---------+
10413 10380 * | | | |
10414 10381 * +----- | cmd_pkt | |
10415 10382 * | cmd_fp_pkt | ---+ |
10416 10383 * +-------->| cmd_fcp_rsp[] | | |
10417 10384 * | +--->| cmd_fcp_cmd[] | | |
10418 10385 * | | |--------------------------------| | |
10419 10386 * | | | struct fc_packet | <--+ |
10420 10387 * | | | | |
10421 10388 * | | | pkt_ulp_private | ----------+
10422 10389 * | | | pkt_fca_private | -----+
10423 10390 * | | | pkt_data_cookie | ---+ |
10424 10391 * | | | pkt_cmdlen | | |
10425 10392 * | |(a) | pkt_rsplen | | |
10426 10393 * | +----| .......... pkt_cmd ........... | ---|-|---------------+
10427 10394 * | (b) | pkt_cmd_cookie | ---|-|----------+ |
10428 10395 * +---------| .......... pkt_resp .......... | ---|-|------+ | |
10429 10396 * | pkt_resp_cookie | ---|-|--+ | | |
10430 10397 * | pkt_cmd_dma | | | | | | |
10431 10398 * | pkt_cmd_acc | | | | | | |
10432 10399 * +================================+ | | | | | |
10433 10400 * | dma_cookies | <--+ | | | | |
10434 10401 * | | | | | | |
10435 10402 * +================================+ | | | | |
10436 10403 * | fca_private | <----+ | | | |
10437 10404 * | | | | | |
10438 10405 * +================================+ | | | |
10439 10406 * | | | |
10440 10407 * | | | |
10441 10408 * +================================+ (d) | | | |
10442 10409 * | fcp_resp cookies | <-------+ | | |
10443 10410 * | | | | |
10444 10411 * +================================+ | | |
10445 10412 * | | |
10446 10413 * +================================+ (d) | | |
10447 10414 * | fcp_resp | <-----------+ | |
10448 10415 * | (DMA resources associated) | | |
10449 10416 * +================================+ | |
10450 10417 * | |
10451 10418 * | |
10452 10419 * | |
10453 10420 * +================================+ (c) | |
10454 10421 * | fcp_cmd cookies | <---------------+ |
10455 10422 * | | |
10456 10423 * +================================+ |
10457 10424 * |
10458 10425 * +================================+ (c) |
10459 10426 * | fcp_cmd | <--------------------+
10460 10427 * | (DMA resources associated) |
10461 10428 * +================================+
10462 10429 *
10463 10430 * (a) Only if DMA is NOT used for the FCP_CMD buffer.
10464 10431 * (b) Only if DMA is NOT used for the FCP_RESP buffer
10465 10432 * (c) Only if DMA is used for the FCP_CMD buffer.
10466 10433 * (d) Only if DMA is used for the FCP_RESP buffer
10467 10434 */
10468 10435 static int
10469 10436 fcp_kmem_cache_constructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran,
10470 10437 int kmflags)
10471 10438 {
10472 10439 struct fcp_pkt *cmd;
10473 10440 struct fcp_port *pptr;
10474 10441 fc_packet_t *fpkt;
10475 10442
10476 10443 pptr = (struct fcp_port *)tran->tran_hba_private;
10477 10444 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
10478 10445 bzero(cmd, tran->tran_hba_len);
10479 10446
10480 10447 cmd->cmd_pkt = pkt;
10481 10448 pkt->pkt_cdbp = cmd->cmd_fcp_cmd.fcp_cdb;
10482 10449 fpkt = (fc_packet_t *)&cmd->cmd_fc_packet;
10483 10450 cmd->cmd_fp_pkt = fpkt;
10484 10451
10485 10452 cmd->cmd_pkt->pkt_ha_private = (opaque_t)cmd;
10486 10453 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
10487 10454 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
10488 10455 sizeof (struct fcp_pkt) + pptr->port_dmacookie_sz);
10489 10456
10490 10457 fpkt->pkt_data_cookie = (ddi_dma_cookie_t *)((caddr_t)cmd +
10491 10458 sizeof (struct fcp_pkt));
10492 10459
10493 10460 fpkt->pkt_cmdlen = sizeof (struct fcp_cmd);
10494 10461 fpkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
10495 10462
10496 10463 if (pptr->port_fcp_dma == FC_NO_DVMA_SPACE) {
10497 10464 /*
10498 10465 * The underlying HBA doesn't want to DMA the fcp_cmd or
10499 10466 * fcp_resp. The transfer of information will be done by
10500 10467 * bcopy.
10501 10468 * The naming of the flags (that is actually a value) is
10502 10469 * unfortunate. FC_NO_DVMA_SPACE doesn't mean "NO VIRTUAL
10503 10470 * DMA" but instead "NO DMA".
10504 10471 */
10505 10472 fpkt->pkt_resp_acc = fpkt->pkt_cmd_acc = NULL;
10506 10473 fpkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
10507 10474 fpkt->pkt_resp = cmd->cmd_fcp_rsp;
10508 10475 } else {
10509 10476 /*
10510 10477 * The underlying HBA will dma the fcp_cmd buffer and fcp_resp
10511 10478 * buffer. A buffer is allocated for each one the ddi_dma_*
10512 10479 * interfaces.
10513 10480 */
10514 10481 if (fcp_alloc_cmd_resp(pptr, fpkt, kmflags) != FC_SUCCESS) {
10515 10482 return (-1);
10516 10483 }
10517 10484 }
10518 10485
10519 10486 return (0);
10520 10487 }
10521 10488
10522 10489 /*
10523 10490 * Function: fcp_kmem_cache_destructor
10524 10491 *
10525 10492 * Description: Called by the destructor of the cache managed by SCSA.
10526 10493 * All the resources pre-allocated in fcp_pkt_constructor
10527 10494 * and the data also pre-initialized in fcp_pkt_constructor
10528 10495 * are freed and uninitialized here.
10529 10496 *
10530 10497 * Argument: *buf Memory to uninitialize.
10531 10498 * *arg FCP port structure (fcp_port).
10532 10499 *
10533 10500 * Return Value: None
10534 10501 *
10535 10502 * Context: kernel
10536 10503 */
10537 10504 static void
10538 10505 fcp_kmem_cache_destructor(struct scsi_pkt *pkt, scsi_hba_tran_t *tran)
10539 10506 {
10540 10507 struct fcp_pkt *cmd;
10541 10508 struct fcp_port *pptr;
10542 10509
10543 10510 pptr = (struct fcp_port *)(tran->tran_hba_private);
10544 10511 cmd = pkt->pkt_ha_private;
10545 10512
10546 10513 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
10547 10514 /*
10548 10515 * If DMA was used to transfer the FCP_CMD and FCP_RESP, the
10549 10516 * buffer and DMA resources allocated to do so are released.
10550 10517 */
10551 10518 fcp_free_cmd_resp(pptr, cmd->cmd_fp_pkt);
10552 10519 }
10553 10520 }
10554 10521
10555 10522 /*
10556 10523 * Function: fcp_alloc_cmd_resp
10557 10524 *
10558 10525 * Description: This function allocated an FCP_CMD and FCP_RESP buffer that
10559 10526 * will be DMAed by the HBA. The buffer is allocated applying
10560 10527 * the DMA requirements for the HBA. The buffers allocated will
10561 10528 * also be bound. DMA resources are allocated in the process.
10562 10529 * They will be released by fcp_free_cmd_resp().
10563 10530 *
10564 10531 * Argument: *pptr FCP port.
10565 10532 * *fpkt fc packet for which the cmd and resp packet should be
10566 10533 * allocated.
10567 10534 * flags Allocation flags.
10568 10535 *
10569 10536 * Return Value: FC_FAILURE
10570 10537 * FC_SUCCESS
10571 10538 *
10572 10539 * Context: User or Kernel context only if flags == KM_SLEEP.
10573 10540 * Interrupt context if the KM_SLEEP is not specified.
10574 10541 */
10575 10542 static int
10576 10543 fcp_alloc_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt, int flags)
10577 10544 {
10578 10545 int rval;
10579 10546 int cmd_len;
10580 10547 int resp_len;
10581 10548 ulong_t real_len;
10582 10549 int (*cb) (caddr_t);
10583 10550 ddi_dma_cookie_t pkt_cookie;
10584 10551 ddi_dma_cookie_t *cp;
10585 10552 uint32_t cnt;
10586 10553
10587 10554 cb = (flags == KM_SLEEP) ? DDI_DMA_SLEEP : DDI_DMA_DONTWAIT;
10588 10555
10589 10556 cmd_len = fpkt->pkt_cmdlen;
10590 10557 resp_len = fpkt->pkt_rsplen;
10591 10558
10592 10559 ASSERT(fpkt->pkt_cmd_dma == NULL);
10593 10560
10594 10561 /* Allocation of a DMA handle used in subsequent calls. */
10595 10562 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_cmd_dma_attr,
10596 10563 cb, NULL, &fpkt->pkt_cmd_dma) != DDI_SUCCESS) {
10597 10564 return (FC_FAILURE);
10598 10565 }
10599 10566
10600 10567 /* A buffer is allocated that satisfies the DMA requirements. */
10601 10568 rval = ddi_dma_mem_alloc(fpkt->pkt_cmd_dma, cmd_len,
10602 10569 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10603 10570 (caddr_t *)&fpkt->pkt_cmd, &real_len, &fpkt->pkt_cmd_acc);
10604 10571
10605 10572 if (rval != DDI_SUCCESS) {
10606 10573 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10607 10574 return (FC_FAILURE);
10608 10575 }
10609 10576
10610 10577 if (real_len < cmd_len) {
10611 10578 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10612 10579 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10613 10580 return (FC_FAILURE);
10614 10581 }
10615 10582
10616 10583 /* The buffer allocated is DMA bound. */
10617 10584 rval = ddi_dma_addr_bind_handle(fpkt->pkt_cmd_dma, NULL,
10618 10585 fpkt->pkt_cmd, real_len, DDI_DMA_WRITE | DDI_DMA_CONSISTENT,
10619 10586 cb, NULL, &pkt_cookie, &fpkt->pkt_cmd_cookie_cnt);
10620 10587
10621 10588 if (rval != DDI_DMA_MAPPED) {
10622 10589 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10623 10590 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10624 10591 return (FC_FAILURE);
10625 10592 }
10626 10593
10627 10594 if (fpkt->pkt_cmd_cookie_cnt >
10628 10595 pptr->port_cmd_dma_attr.dma_attr_sgllen) {
10629 10596 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10630 10597 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10631 10598 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10632 10599 return (FC_FAILURE);
10633 10600 }
10634 10601
10635 10602 ASSERT(fpkt->pkt_cmd_cookie_cnt != 0);
10636 10603
10637 10604 /*
10638 10605 * The buffer where the scatter/gather list is going to be built is
10639 10606 * allocated.
10640 10607 */
10641 10608 cp = fpkt->pkt_cmd_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10642 10609 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie),
10643 10610 KM_NOSLEEP);
10644 10611
10645 10612 if (cp == NULL) {
10646 10613 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10647 10614 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10648 10615 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10649 10616 return (FC_FAILURE);
10650 10617 }
10651 10618
10652 10619 /*
10653 10620 * The scatter/gather list for the buffer we just allocated is built
10654 10621 * here.
10655 10622 */
10656 10623 *cp = pkt_cookie;
10657 10624 cp++;
10658 10625
10659 10626 for (cnt = 1; cnt < fpkt->pkt_cmd_cookie_cnt; cnt++, cp++) {
10660 10627 ddi_dma_nextcookie(fpkt->pkt_cmd_dma,
10661 10628 &pkt_cookie);
10662 10629 *cp = pkt_cookie;
10663 10630 }
10664 10631
10665 10632 ASSERT(fpkt->pkt_resp_dma == NULL);
10666 10633 if (ddi_dma_alloc_handle(pptr->port_dip, &pptr->port_resp_dma_attr,
10667 10634 cb, NULL, &fpkt->pkt_resp_dma) != DDI_SUCCESS) {
10668 10635 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10669 10636 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10670 10637 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10671 10638 return (FC_FAILURE);
10672 10639 }
10673 10640
10674 10641 rval = ddi_dma_mem_alloc(fpkt->pkt_resp_dma, resp_len,
10675 10642 &pptr->port_dma_acc_attr, DDI_DMA_CONSISTENT, cb, NULL,
10676 10643 (caddr_t *)&fpkt->pkt_resp, &real_len,
10677 10644 &fpkt->pkt_resp_acc);
10678 10645
10679 10646 if (rval != DDI_SUCCESS) {
10680 10647 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10681 10648 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10682 10649 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10683 10650 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10684 10651 kmem_free(fpkt->pkt_cmd_cookie,
10685 10652 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10686 10653 return (FC_FAILURE);
10687 10654 }
10688 10655
10689 10656 if (real_len < resp_len) {
10690 10657 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10691 10658 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10692 10659 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10693 10660 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10694 10661 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10695 10662 kmem_free(fpkt->pkt_cmd_cookie,
10696 10663 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10697 10664 return (FC_FAILURE);
10698 10665 }
10699 10666
10700 10667 rval = ddi_dma_addr_bind_handle(fpkt->pkt_resp_dma, NULL,
10701 10668 fpkt->pkt_resp, real_len, DDI_DMA_READ | DDI_DMA_CONSISTENT,
10702 10669 cb, NULL, &pkt_cookie, &fpkt->pkt_resp_cookie_cnt);
10703 10670
10704 10671 if (rval != DDI_DMA_MAPPED) {
10705 10672 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10706 10673 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10707 10674 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10708 10675 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10709 10676 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10710 10677 kmem_free(fpkt->pkt_cmd_cookie,
10711 10678 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10712 10679 return (FC_FAILURE);
10713 10680 }
10714 10681
10715 10682 if (fpkt->pkt_resp_cookie_cnt >
10716 10683 pptr->port_resp_dma_attr.dma_attr_sgllen) {
10717 10684 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10718 10685 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10719 10686 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10720 10687 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10721 10688 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10722 10689 kmem_free(fpkt->pkt_cmd_cookie,
10723 10690 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10724 10691 return (FC_FAILURE);
10725 10692 }
10726 10693
10727 10694 ASSERT(fpkt->pkt_resp_cookie_cnt != 0);
10728 10695
10729 10696 cp = fpkt->pkt_resp_cookie = (ddi_dma_cookie_t *)kmem_alloc(
10730 10697 fpkt->pkt_resp_cookie_cnt * sizeof (pkt_cookie),
10731 10698 KM_NOSLEEP);
10732 10699
10733 10700 if (cp == NULL) {
10734 10701 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10735 10702 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10736 10703 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10737 10704 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10738 10705 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10739 10706 kmem_free(fpkt->pkt_cmd_cookie,
10740 10707 fpkt->pkt_cmd_cookie_cnt * sizeof (pkt_cookie));
10741 10708 return (FC_FAILURE);
10742 10709 }
10743 10710
10744 10711 *cp = pkt_cookie;
10745 10712 cp++;
10746 10713
10747 10714 for (cnt = 1; cnt < fpkt->pkt_resp_cookie_cnt; cnt++, cp++) {
10748 10715 ddi_dma_nextcookie(fpkt->pkt_resp_dma,
10749 10716 &pkt_cookie);
10750 10717 *cp = pkt_cookie;
10751 10718 }
10752 10719
10753 10720 return (FC_SUCCESS);
10754 10721 }
10755 10722
10756 10723 /*
10757 10724 * Function: fcp_free_cmd_resp
10758 10725 *
10759 10726 * Description: This function releases the FCP_CMD and FCP_RESP buffer
10760 10727 * allocated by fcp_alloc_cmd_resp() and all the resources
10761 10728 * associated with them. That includes the DMA resources and the
10762 10729 * buffer allocated for the cookies of each one of them.
10763 10730 *
10764 10731 * Argument: *pptr FCP port context.
10765 10732 * *fpkt fc packet containing the cmd and resp packet
10766 10733 * to be released.
10767 10734 *
10768 10735 * Return Value: None
10769 10736 *
10770 10737 * Context: Interrupt, User and Kernel context.
10771 10738 */
10772 10739 /* ARGSUSED */
10773 10740 static void
10774 10741 fcp_free_cmd_resp(struct fcp_port *pptr, fc_packet_t *fpkt)
10775 10742 {
10776 10743 ASSERT(fpkt->pkt_resp_dma != NULL && fpkt->pkt_cmd_dma != NULL);
10777 10744
10778 10745 if (fpkt->pkt_resp_dma) {
10779 10746 (void) ddi_dma_unbind_handle(fpkt->pkt_resp_dma);
10780 10747 ddi_dma_mem_free(&fpkt->pkt_resp_acc);
10781 10748 ddi_dma_free_handle(&fpkt->pkt_resp_dma);
10782 10749 }
10783 10750
10784 10751 if (fpkt->pkt_resp_cookie) {
10785 10752 kmem_free(fpkt->pkt_resp_cookie,
10786 10753 fpkt->pkt_resp_cookie_cnt * sizeof (ddi_dma_cookie_t));
10787 10754 fpkt->pkt_resp_cookie = NULL;
10788 10755 }
10789 10756
10790 10757 if (fpkt->pkt_cmd_dma) {
10791 10758 (void) ddi_dma_unbind_handle(fpkt->pkt_cmd_dma);
10792 10759 ddi_dma_mem_free(&fpkt->pkt_cmd_acc);
10793 10760 ddi_dma_free_handle(&fpkt->pkt_cmd_dma);
10794 10761 }
10795 10762
10796 10763 if (fpkt->pkt_cmd_cookie) {
10797 10764 kmem_free(fpkt->pkt_cmd_cookie,
10798 10765 fpkt->pkt_cmd_cookie_cnt * sizeof (ddi_dma_cookie_t));
10799 10766 fpkt->pkt_cmd_cookie = NULL;
10800 10767 }
10801 10768 }
10802 10769
10803 10770
10804 10771 /*
10805 10772 * called by the transport to do our own target initialization
10806 10773 *
10807 10774 * can acquire and release the global mutex
10808 10775 */
10809 10776 /* ARGSUSED */
10810 10777 static int
10811 10778 fcp_phys_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10812 10779 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10813 10780 {
10814 10781 uchar_t *bytes;
10815 10782 uint_t nbytes;
10816 10783 uint16_t lun_num;
10817 10784 struct fcp_tgt *ptgt;
10818 10785 struct fcp_lun *plun;
10819 10786 struct fcp_port *pptr = (struct fcp_port *)
10820 10787 hba_tran->tran_hba_private;
10821 10788
10822 10789 ASSERT(pptr != NULL);
10823 10790
10824 10791 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10825 10792 FCP_BUF_LEVEL_8, 0,
10826 10793 "fcp_phys_tgt_init: called for %s (instance %d)",
10827 10794 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
10828 10795
10829 10796 /* get our port WWN property */
10830 10797 bytes = NULL;
10831 10798 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10832 10799 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10833 10800 (nbytes != FC_WWN_SIZE)) {
10834 10801 /* no port WWN property */
10835 10802 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10836 10803 FCP_BUF_LEVEL_8, 0,
10837 10804 "fcp_phys_tgt_init: Returning DDI_NOT_WELL_FORMED"
10838 10805 " for %s (instance %d): bytes=%p nbytes=%x",
10839 10806 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip), bytes,
10840 10807 nbytes);
10841 10808
10842 10809 if (bytes != NULL) {
10843 10810 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10844 10811 }
10845 10812
10846 10813 return (DDI_NOT_WELL_FORMED);
10847 10814 }
10848 10815 ASSERT(bytes != NULL);
10849 10816
10850 10817 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10851 10818 LUN_PROP, 0xFFFF);
10852 10819 if (lun_num == 0xFFFF) {
10853 10820 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10854 10821 FCP_BUF_LEVEL_8, 0,
10855 10822 "fcp_phys_tgt_init: Returning DDI_FAILURE:lun"
10856 10823 " for %s (instance %d)", ddi_get_name(tgt_dip),
10857 10824 ddi_get_instance(tgt_dip));
10858 10825
10859 10826 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10860 10827 return (DDI_NOT_WELL_FORMED);
10861 10828 }
10862 10829
10863 10830 mutex_enter(&pptr->port_mutex);
10864 10831 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10865 10832 mutex_exit(&pptr->port_mutex);
10866 10833 FCP_DTRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
10867 10834 FCP_BUF_LEVEL_8, 0,
10868 10835 "fcp_phys_tgt_init: Returning DDI_FAILURE: No Lun"
10869 10836 " for %s (instance %d)", ddi_get_name(tgt_dip),
10870 10837 ddi_get_instance(tgt_dip));
10871 10838
10872 10839 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10873 10840 return (DDI_FAILURE);
10874 10841 }
10875 10842
10876 10843 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10877 10844 FC_WWN_SIZE) == 0);
10878 10845 ASSERT(plun->lun_num == lun_num);
10879 10846
10880 10847 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10881 10848
10882 10849 ptgt = plun->lun_tgt;
10883 10850
10884 10851 mutex_enter(&ptgt->tgt_mutex);
10885 10852 plun->lun_tgt_count++;
10886 10853 scsi_device_hba_private_set(sd, plun);
10887 10854 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10888 10855 plun->lun_sd = sd;
10889 10856 mutex_exit(&ptgt->tgt_mutex);
10890 10857 mutex_exit(&pptr->port_mutex);
10891 10858
10892 10859 return (DDI_SUCCESS);
10893 10860 }
10894 10861
10895 10862 /*ARGSUSED*/
10896 10863 static int
10897 10864 fcp_virt_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10898 10865 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10899 10866 {
10900 10867 uchar_t *bytes;
10901 10868 uint_t nbytes;
10902 10869 uint16_t lun_num;
10903 10870 struct fcp_tgt *ptgt;
10904 10871 struct fcp_lun *plun;
10905 10872 struct fcp_port *pptr = (struct fcp_port *)
10906 10873 hba_tran->tran_hba_private;
10907 10874 child_info_t *cip;
10908 10875
10909 10876 ASSERT(pptr != NULL);
10910 10877
10911 10878 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10912 10879 fcp_trace, FCP_BUF_LEVEL_8, 0,
10913 10880 "fcp_virt_tgt_init: called for %s (instance %d) (hba_dip %p),"
10914 10881 " (tgt_dip %p)", ddi_get_name(tgt_dip),
10915 10882 ddi_get_instance(tgt_dip), hba_dip, tgt_dip);
10916 10883
10917 10884 cip = (child_info_t *)sd->sd_pathinfo;
10918 10885 if (cip == NULL) {
10919 10886 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10920 10887 fcp_trace, FCP_BUF_LEVEL_8, 0,
10921 10888 "fcp_virt_tgt_init: Returning DDI_NOT_WELL_FORMED"
10922 10889 " for %s (instance %d)", ddi_get_name(tgt_dip),
10923 10890 ddi_get_instance(tgt_dip));
10924 10891
10925 10892 return (DDI_NOT_WELL_FORMED);
10926 10893 }
10927 10894
10928 10895 /* get our port WWN property */
10929 10896 bytes = NULL;
10930 10897 if ((scsi_device_prop_lookup_byte_array(sd, SCSI_DEVICE_PROP_PATH,
10931 10898 PORT_WWN_PROP, &bytes, &nbytes) != DDI_PROP_SUCCESS) ||
10932 10899 (nbytes != FC_WWN_SIZE)) {
10933 10900 if (bytes) {
10934 10901 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10935 10902 }
10936 10903 return (DDI_NOT_WELL_FORMED);
10937 10904 }
10938 10905
10939 10906 ASSERT(bytes != NULL);
10940 10907
10941 10908 lun_num = scsi_device_prop_get_int(sd, SCSI_DEVICE_PROP_PATH,
10942 10909 LUN_PROP, 0xFFFF);
10943 10910 if (lun_num == 0xFFFF) {
10944 10911 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10945 10912 fcp_trace, FCP_BUF_LEVEL_8, 0,
10946 10913 "fcp_virt_tgt_init: Returning DDI_FAILURE:lun"
10947 10914 " for %s (instance %d)", ddi_get_name(tgt_dip),
10948 10915 ddi_get_instance(tgt_dip));
10949 10916
10950 10917 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10951 10918 return (DDI_NOT_WELL_FORMED);
10952 10919 }
10953 10920
10954 10921 mutex_enter(&pptr->port_mutex);
10955 10922 if ((plun = fcp_lookup_lun(pptr, bytes, lun_num)) == NULL) {
10956 10923 mutex_exit(&pptr->port_mutex);
10957 10924 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
10958 10925 fcp_trace, FCP_BUF_LEVEL_8, 0,
10959 10926 "fcp_virt_tgt_init: Returning DDI_FAILURE: No Lun"
10960 10927 " for %s (instance %d)", ddi_get_name(tgt_dip),
10961 10928 ddi_get_instance(tgt_dip));
10962 10929
10963 10930 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10964 10931 return (DDI_FAILURE);
10965 10932 }
10966 10933
10967 10934 ASSERT(bcmp(plun->lun_tgt->tgt_port_wwn.raw_wwn, bytes,
10968 10935 FC_WWN_SIZE) == 0);
10969 10936 ASSERT(plun->lun_num == lun_num);
10970 10937
10971 10938 scsi_device_prop_free(sd, SCSI_DEVICE_PROP_PATH, bytes);
10972 10939
10973 10940 ptgt = plun->lun_tgt;
10974 10941
10975 10942 mutex_enter(&ptgt->tgt_mutex);
10976 10943 plun->lun_tgt_count++;
10977 10944 scsi_device_hba_private_set(sd, plun);
10978 10945 plun->lun_state |= FCP_SCSI_LUN_TGT_INIT;
10979 10946 plun->lun_sd = sd;
10980 10947 mutex_exit(&ptgt->tgt_mutex);
10981 10948 mutex_exit(&pptr->port_mutex);
10982 10949
10983 10950 return (DDI_SUCCESS);
10984 10951 }
10985 10952
10986 10953
10987 10954 /*
10988 10955 * called by the transport to do our own target initialization
10989 10956 *
10990 10957 * can acquire and release the global mutex
10991 10958 */
10992 10959 /* ARGSUSED */
10993 10960 static int
10994 10961 fcp_scsi_tgt_init(dev_info_t *hba_dip, dev_info_t *tgt_dip,
10995 10962 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
10996 10963 {
10997 10964 struct fcp_port *pptr = (struct fcp_port *)
10998 10965 hba_tran->tran_hba_private;
10999 10966 int rval;
11000 10967
11001 10968 ASSERT(pptr != NULL);
11002 10969
11003 10970 /*
11004 10971 * Child node is getting initialized. Look at the mpxio component
11005 10972 * type on the child device to see if this device is mpxio managed
11006 10973 * or not.
11007 10974 */
11008 10975 if (mdi_component_is_client(tgt_dip, NULL) == MDI_SUCCESS) {
11009 10976 rval = fcp_virt_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11010 10977 } else {
11011 10978 rval = fcp_phys_tgt_init(hba_dip, tgt_dip, hba_tran, sd);
11012 10979 }
11013 10980
11014 10981 return (rval);
11015 10982 }
11016 10983
11017 10984
11018 10985 /* ARGSUSED */
11019 10986 static void
11020 10987 fcp_scsi_tgt_free(dev_info_t *hba_dip, dev_info_t *tgt_dip,
11021 10988 scsi_hba_tran_t *hba_tran, struct scsi_device *sd)
11022 10989 {
11023 10990 struct fcp_lun *plun = scsi_device_hba_private_get(sd);
11024 10991 struct fcp_tgt *ptgt;
11025 10992
11026 10993 FCP_DTRACE(fcp_logq, LUN_PORT->port_instbuf,
11027 10994 fcp_trace, FCP_BUF_LEVEL_8, 0,
11028 10995 "fcp_scsi_tgt_free: called for tran %s%d, dev %s%d",
11029 10996 ddi_get_name(hba_dip), ddi_get_instance(hba_dip),
11030 10997 ddi_get_name(tgt_dip), ddi_get_instance(tgt_dip));
11031 10998
11032 10999 if (plun == NULL) {
11033 11000 return;
11034 11001 }
11035 11002 ptgt = plun->lun_tgt;
11036 11003
11037 11004 ASSERT(ptgt != NULL);
11038 11005
11039 11006 mutex_enter(&ptgt->tgt_mutex);
11040 11007 ASSERT(plun->lun_tgt_count > 0);
11041 11008
11042 11009 if (--plun->lun_tgt_count == 0) {
11043 11010 plun->lun_state &= ~FCP_SCSI_LUN_TGT_INIT;
11044 11011 }
11045 11012 plun->lun_sd = NULL;
11046 11013 mutex_exit(&ptgt->tgt_mutex);
11047 11014 }
11048 11015
11049 11016 /*
11050 11017 * Function: fcp_scsi_start
11051 11018 *
11052 11019 * Description: This function is called by the target driver to request a
11053 11020 * command to be sent.
11054 11021 *
11055 11022 * Argument: *ap SCSI address of the device.
11056 11023 * *pkt SCSI packet containing the cmd to send.
11057 11024 *
11058 11025 * Return Value: TRAN_ACCEPT
11059 11026 * TRAN_BUSY
11060 11027 * TRAN_BADPKT
11061 11028 * TRAN_FATAL_ERROR
11062 11029 */
11063 11030 static int
11064 11031 fcp_scsi_start(struct scsi_address *ap, struct scsi_pkt *pkt)
11065 11032 {
11066 11033 struct fcp_port *pptr = ADDR2FCP(ap);
11067 11034 struct fcp_lun *plun = ADDR2LUN(ap);
11068 11035 struct fcp_pkt *cmd = PKT2CMD(pkt);
11069 11036 struct fcp_tgt *ptgt = plun->lun_tgt;
11070 11037 int rval;
11071 11038
11072 11039 /* ensure command isn't already issued */
11073 11040 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
11074 11041
11075 11042 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11076 11043 fcp_trace, FCP_BUF_LEVEL_9, 0,
11077 11044 "fcp_transport Invoked for %x", plun->lun_tgt->tgt_d_id);
11078 11045
11079 11046 /*
11080 11047 * It is strange that we enter the fcp_port mutex and the target
11081 11048 * mutex to check the lun state (which has a mutex of its own).
11082 11049 */
11083 11050 mutex_enter(&pptr->port_mutex);
11084 11051 mutex_enter(&ptgt->tgt_mutex);
11085 11052
11086 11053 /*
11087 11054 * If the device is offline and is not in the process of coming
11088 11055 * online, fail the request.
11089 11056 */
11090 11057
11091 11058 if ((plun->lun_state & FCP_LUN_OFFLINE) &&
11092 11059 !(plun->lun_state & FCP_LUN_ONLINING)) {
11093 11060 mutex_exit(&ptgt->tgt_mutex);
11094 11061 mutex_exit(&pptr->port_mutex);
11095 11062
11096 11063 if (cmd->cmd_fp_pkt->pkt_pd == NULL) {
11097 11064 pkt->pkt_reason = CMD_DEV_GONE;
11098 11065 }
11099 11066
11100 11067 return (TRAN_FATAL_ERROR);
11101 11068 }
11102 11069 cmd->cmd_fp_pkt->pkt_timeout = pkt->pkt_time;
11103 11070
11104 11071 /*
11105 11072 * If we are suspended, kernel is trying to dump, so don't
11106 11073 * block, fail or defer requests - send them down right away.
11107 11074 * NOTE: If we are in panic (i.e. trying to dump), we can't
11108 11075 * assume we have been suspended. There is hardware such as
11109 11076 * the v880 that doesn't do PM. Thus, the check for
11110 11077 * ddi_in_panic.
11111 11078 *
11112 11079 * If FCP_STATE_IN_CB_DEVC is set, devices are in the process
11113 11080 * of changing. So, if we can queue the packet, do it. Eventually,
11114 11081 * either the device will have gone away or changed and we can fail
11115 11082 * the request, or we can proceed if the device didn't change.
11116 11083 *
11117 11084 * If the pd in the target or the packet is NULL it's probably
11118 11085 * because the device has gone away, we allow the request to be
11119 11086 * put on the internal queue here in case the device comes back within
11120 11087 * the offline timeout. fctl will fix up the pd's if the tgt_pd_handle
11121 11088 * has gone NULL, while fcp deals cases where pkt_pd is NULL. pkt_pd
11122 11089 * could be NULL because the device was disappearing during or since
11123 11090 * packet initialization.
11124 11091 */
11125 11092
11126 11093 if (((plun->lun_state & FCP_LUN_BUSY) && (!(pptr->port_state &
11127 11094 FCP_STATE_SUSPENDED)) && !ddi_in_panic()) ||
11128 11095 (pptr->port_state & (FCP_STATE_ONLINING | FCP_STATE_IN_CB_DEVC)) ||
11129 11096 (ptgt->tgt_pd_handle == NULL) ||
11130 11097 (cmd->cmd_fp_pkt->pkt_pd == NULL)) {
11131 11098 /*
11132 11099 * If ((LUN is busy AND
11133 11100 * LUN not suspended AND
11134 11101 * The system is not in panic state) OR
11135 11102 * (The port is coming up))
11136 11103 *
11137 11104 * We check to see if the any of the flags FLAG_NOINTR or
11138 11105 * FLAG_NOQUEUE is set. If one of them is set the value
11139 11106 * returned will be TRAN_BUSY. If not, the request is queued.
11140 11107 */
11141 11108 mutex_exit(&ptgt->tgt_mutex);
11142 11109 mutex_exit(&pptr->port_mutex);
11143 11110
11144 11111 /* see if using interrupts is allowed (so queueing'll work) */
11145 11112 if (pkt->pkt_flags & FLAG_NOINTR) {
11146 11113 pkt->pkt_resid = 0;
11147 11114 return (TRAN_BUSY);
11148 11115 }
11149 11116 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11150 11117 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11151 11118 fcp_trace, FCP_BUF_LEVEL_9, 0,
11152 11119 "fcp_scsi_start: lun busy for pkt %p", pkt);
11153 11120 return (TRAN_BUSY);
11154 11121 }
11155 11122 #ifdef DEBUG
11156 11123 mutex_enter(&pptr->port_pkt_mutex);
11157 11124 pptr->port_npkts++;
11158 11125 mutex_exit(&pptr->port_pkt_mutex);
11159 11126 #endif /* DEBUG */
11160 11127
11161 11128 /* got queue up the pkt for later */
11162 11129 fcp_queue_pkt(pptr, cmd);
11163 11130 return (TRAN_ACCEPT);
11164 11131 }
11165 11132 cmd->cmd_state = FCP_PKT_ISSUED;
11166 11133
11167 11134 mutex_exit(&ptgt->tgt_mutex);
11168 11135 mutex_exit(&pptr->port_mutex);
11169 11136
11170 11137 /*
11171 11138 * Now that we released the mutexes, what was protected by them can
11172 11139 * change.
11173 11140 */
11174 11141
11175 11142 /*
11176 11143 * If there is a reconfiguration in progress, wait for it to complete.
11177 11144 */
11178 11145 fcp_reconfig_wait(pptr);
11179 11146
11180 11147 cmd->cmd_timeout = pkt->pkt_time ? fcp_watchdog_time +
11181 11148 pkt->pkt_time : 0;
11182 11149
11183 11150 /* prepare the packet */
11184 11151
11185 11152 fcp_prepare_pkt(pptr, cmd, plun);
11186 11153
11187 11154 if (cmd->cmd_pkt->pkt_time) {
11188 11155 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11189 11156 } else {
11190 11157 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11191 11158 }
11192 11159
11193 11160 /*
11194 11161 * if interrupts aren't allowed (e.g. at dump time) then we'll
11195 11162 * have to do polled I/O
11196 11163 */
11197 11164 if (pkt->pkt_flags & FLAG_NOINTR) {
11198 11165 cmd->cmd_state &= ~FCP_PKT_ISSUED;
11199 11166 return (fcp_dopoll(pptr, cmd));
11200 11167 }
11201 11168
11202 11169 #ifdef DEBUG
11203 11170 mutex_enter(&pptr->port_pkt_mutex);
11204 11171 pptr->port_npkts++;
11205 11172 mutex_exit(&pptr->port_pkt_mutex);
11206 11173 #endif /* DEBUG */
11207 11174
11208 11175 rval = fcp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt, 0);
11209 11176 if (rval == FC_SUCCESS) {
11210 11177 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11211 11178 fcp_trace, FCP_BUF_LEVEL_9, 0,
11212 11179 "fcp_transport success for %x", plun->lun_tgt->tgt_d_id);
11213 11180 return (TRAN_ACCEPT);
11214 11181 }
11215 11182
11216 11183 cmd->cmd_state = FCP_PKT_IDLE;
11217 11184
11218 11185 #ifdef DEBUG
11219 11186 mutex_enter(&pptr->port_pkt_mutex);
11220 11187 pptr->port_npkts--;
11221 11188 mutex_exit(&pptr->port_pkt_mutex);
11222 11189 #endif /* DEBUG */
11223 11190
11224 11191 /*
11225 11192 * For lack of clearer definitions, choose
11226 11193 * between TRAN_BUSY and TRAN_FATAL_ERROR.
11227 11194 */
11228 11195
11229 11196 if (rval == FC_TRAN_BUSY) {
11230 11197 pkt->pkt_resid = 0;
11231 11198 rval = TRAN_BUSY;
11232 11199 } else {
11233 11200 mutex_enter(&ptgt->tgt_mutex);
11234 11201 if (plun->lun_state & FCP_LUN_OFFLINE) {
11235 11202 child_info_t *cip;
11236 11203
11237 11204 mutex_enter(&plun->lun_mutex);
11238 11205 cip = plun->lun_cip;
11239 11206 mutex_exit(&plun->lun_mutex);
11240 11207
11241 11208 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11242 11209 fcp_trace, FCP_BUF_LEVEL_6, 0,
11243 11210 "fcp_transport failed 2 for %x: %x; dip=%p",
11244 11211 plun->lun_tgt->tgt_d_id, rval, cip);
11245 11212
11246 11213 rval = TRAN_FATAL_ERROR;
11247 11214 } else {
11248 11215 if (pkt->pkt_flags & FLAG_NOQUEUE) {
11249 11216 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
11250 11217 fcp_trace, FCP_BUF_LEVEL_9, 0,
11251 11218 "fcp_scsi_start: FC_BUSY for pkt %p",
11252 11219 pkt);
11253 11220 rval = TRAN_BUSY;
11254 11221 } else {
11255 11222 rval = TRAN_ACCEPT;
11256 11223 fcp_queue_pkt(pptr, cmd);
11257 11224 }
11258 11225 }
11259 11226 mutex_exit(&ptgt->tgt_mutex);
11260 11227 }
11261 11228
11262 11229 return (rval);
11263 11230 }
11264 11231
11265 11232 /*
11266 11233 * called by the transport to abort a packet
11267 11234 */
11268 11235 /*ARGSUSED*/
11269 11236 static int
11270 11237 fcp_scsi_abort(struct scsi_address *ap, struct scsi_pkt *pkt)
11271 11238 {
11272 11239 int tgt_cnt;
11273 11240 struct fcp_port *pptr = ADDR2FCP(ap);
11274 11241 struct fcp_lun *plun = ADDR2LUN(ap);
11275 11242 struct fcp_tgt *ptgt = plun->lun_tgt;
11276 11243
11277 11244 if (pkt == NULL) {
11278 11245 if (ptgt) {
11279 11246 mutex_enter(&ptgt->tgt_mutex);
11280 11247 tgt_cnt = ptgt->tgt_change_cnt;
11281 11248 mutex_exit(&ptgt->tgt_mutex);
11282 11249 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
11283 11250 return (TRUE);
11284 11251 }
11285 11252 }
11286 11253 return (FALSE);
11287 11254 }
11288 11255
11289 11256
11290 11257 /*
11291 11258 * Perform reset
11292 11259 */
11293 11260 int
11294 11261 fcp_scsi_reset(struct scsi_address *ap, int level)
11295 11262 {
11296 11263 int rval = 0;
11297 11264 struct fcp_port *pptr = ADDR2FCP(ap);
11298 11265 struct fcp_lun *plun = ADDR2LUN(ap);
11299 11266 struct fcp_tgt *ptgt = plun->lun_tgt;
11300 11267
11301 11268 if (level == RESET_ALL) {
11302 11269 if (fcp_linkreset(pptr, ap, KM_NOSLEEP) == FC_SUCCESS) {
11303 11270 rval = 1;
11304 11271 }
11305 11272 } else if (level == RESET_TARGET || level == RESET_LUN) {
11306 11273 /*
11307 11274 * If we are in the middle of discovery, return
11308 11275 * SUCCESS as this target will be rediscovered
11309 11276 * anyway
11310 11277 */
11311 11278 mutex_enter(&ptgt->tgt_mutex);
11312 11279 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11313 11280 mutex_exit(&ptgt->tgt_mutex);
11314 11281 return (1);
11315 11282 }
11316 11283 mutex_exit(&ptgt->tgt_mutex);
11317 11284
11318 11285 if (fcp_reset_target(ap, level) == FC_SUCCESS) {
11319 11286 rval = 1;
11320 11287 }
11321 11288 }
11322 11289 return (rval);
11323 11290 }
11324 11291
11325 11292
11326 11293 /*
11327 11294 * called by the framework to get a SCSI capability
11328 11295 */
11329 11296 static int
11330 11297 fcp_scsi_getcap(struct scsi_address *ap, char *cap, int whom)
11331 11298 {
11332 11299 return (fcp_commoncap(ap, cap, 0, whom, 0));
11333 11300 }
11334 11301
11335 11302
11336 11303 /*
11337 11304 * called by the framework to set a SCSI capability
11338 11305 */
11339 11306 static int
11340 11307 fcp_scsi_setcap(struct scsi_address *ap, char *cap, int value, int whom)
11341 11308 {
11342 11309 return (fcp_commoncap(ap, cap, value, whom, 1));
11343 11310 }
11344 11311
11345 11312 /*
11346 11313 * Function: fcp_pkt_setup
11347 11314 *
11348 11315 * Description: This function sets up the scsi_pkt structure passed by the
11349 11316 * caller. This function assumes fcp_pkt_constructor has been
11350 11317 * called previously for the packet passed by the caller. If
11351 11318 * successful this call will have the following results:
11352 11319 *
11353 11320 * - The resources needed that will be constant through out
11354 11321 * the whole transaction are allocated.
11355 11322 * - The fields that will be constant through out the whole
11356 11323 * transaction are initialized.
11357 11324 * - The scsi packet will be linked to the LUN structure
11358 11325 * addressed by the transaction.
11359 11326 *
11360 11327 * Argument:
11361 11328 * *pkt Pointer to a scsi_pkt structure.
11362 11329 * callback
11363 11330 * arg
11364 11331 *
11365 11332 * Return Value: 0 Success
11366 11333 * !0 Failure
11367 11334 *
11368 11335 * Context: Kernel context or interrupt context
11369 11336 */
11370 11337 /* ARGSUSED */
11371 11338 static int
11372 11339 fcp_pkt_setup(struct scsi_pkt *pkt,
11373 11340 int (*callback)(caddr_t arg),
11374 11341 caddr_t arg)
11375 11342 {
11376 11343 struct fcp_pkt *cmd;
11377 11344 struct fcp_port *pptr;
11378 11345 struct fcp_lun *plun;
11379 11346 struct fcp_tgt *ptgt;
11380 11347 int kf;
11381 11348 fc_packet_t *fpkt;
11382 11349 fc_frame_hdr_t *hp;
11383 11350
11384 11351 pptr = ADDR2FCP(&pkt->pkt_address);
11385 11352 plun = ADDR2LUN(&pkt->pkt_address);
11386 11353 ptgt = plun->lun_tgt;
11387 11354
11388 11355 cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11389 11356 fpkt = cmd->cmd_fp_pkt;
11390 11357
11391 11358 /*
11392 11359 * this request is for dma allocation only
11393 11360 */
11394 11361 /*
11395 11362 * First step of fcp_scsi_init_pkt: pkt allocation
11396 11363 * We determine if the caller is willing to wait for the
11397 11364 * resources.
11398 11365 */
11399 11366 kf = (callback == SLEEP_FUNC) ? KM_SLEEP: KM_NOSLEEP;
11400 11367
11401 11368 /*
11402 11369 * Selective zeroing of the pkt.
11403 11370 */
11404 11371 cmd->cmd_back = NULL;
11405 11372 cmd->cmd_next = NULL;
11406 11373
11407 11374 /*
11408 11375 * Zero out fcp command
11409 11376 */
11410 11377 bzero(&cmd->cmd_fcp_cmd, sizeof (cmd->cmd_fcp_cmd));
11411 11378
11412 11379 cmd->cmd_state = FCP_PKT_IDLE;
11413 11380
11414 11381 fpkt = cmd->cmd_fp_pkt;
11415 11382 fpkt->pkt_data_acc = NULL;
11416 11383
11417 11384 /*
11418 11385 * When port_state is FCP_STATE_OFFLINE, remote_port (tgt_pd_handle)
11419 11386 * could be destroyed. We need fail pkt_setup.
11420 11387 */
11421 11388 if (pptr->port_state & FCP_STATE_OFFLINE) {
11422 11389 return (-1);
11423 11390 }
11424 11391
11425 11392 mutex_enter(&ptgt->tgt_mutex);
11426 11393 fpkt->pkt_pd = ptgt->tgt_pd_handle;
11427 11394
11428 11395 if (fc_ulp_init_packet(pptr->port_fp_handle, fpkt, kf)
11429 11396 != FC_SUCCESS) {
11430 11397 mutex_exit(&ptgt->tgt_mutex);
11431 11398 return (-1);
11432 11399 }
11433 11400
11434 11401 mutex_exit(&ptgt->tgt_mutex);
11435 11402
11436 11403 /* Fill in the Fabric Channel Header */
11437 11404 hp = &fpkt->pkt_cmd_fhdr;
11438 11405 hp->r_ctl = R_CTL_COMMAND;
11439 11406 hp->rsvd = 0;
11440 11407 hp->type = FC_TYPE_SCSI_FCP;
11441 11408 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
11442 11409 hp->seq_id = 0;
11443 11410 hp->df_ctl = 0;
11444 11411 hp->seq_cnt = 0;
11445 11412 hp->ox_id = 0xffff;
11446 11413 hp->rx_id = 0xffff;
11447 11414 hp->ro = 0;
11448 11415
11449 11416 /*
11450 11417 * A doubly linked list (cmd_forw, cmd_back) is built
11451 11418 * out of every allocated packet on a per-lun basis
11452 11419 *
11453 11420 * The packets are maintained in the list so as to satisfy
11454 11421 * scsi_abort() requests. At present (which is unlikely to
11455 11422 * change in the future) nobody performs a real scsi_abort
11456 11423 * in the SCSI target drivers (as they don't keep the packets
11457 11424 * after doing scsi_transport - so they don't know how to
11458 11425 * abort a packet other than sending a NULL to abort all
11459 11426 * outstanding packets)
11460 11427 */
11461 11428 mutex_enter(&plun->lun_mutex);
11462 11429 if ((cmd->cmd_forw = plun->lun_pkt_head) != NULL) {
11463 11430 plun->lun_pkt_head->cmd_back = cmd;
11464 11431 } else {
11465 11432 plun->lun_pkt_tail = cmd;
11466 11433 }
11467 11434 plun->lun_pkt_head = cmd;
11468 11435 mutex_exit(&plun->lun_mutex);
11469 11436 return (0);
11470 11437 }
11471 11438
11472 11439 /*
11473 11440 * Function: fcp_pkt_teardown
11474 11441 *
11475 11442 * Description: This function releases a scsi_pkt structure and all the
11476 11443 * resources attached to it.
11477 11444 *
11478 11445 * Argument: *pkt Pointer to a scsi_pkt structure.
11479 11446 *
11480 11447 * Return Value: None
11481 11448 *
11482 11449 * Context: User, Kernel or Interrupt context.
11483 11450 */
11484 11451 static void
11485 11452 fcp_pkt_teardown(struct scsi_pkt *pkt)
11486 11453 {
11487 11454 struct fcp_port *pptr = ADDR2FCP(&pkt->pkt_address);
11488 11455 struct fcp_lun *plun = ADDR2LUN(&pkt->pkt_address);
11489 11456 struct fcp_pkt *cmd = (struct fcp_pkt *)pkt->pkt_ha_private;
11490 11457
11491 11458 /*
11492 11459 * Remove the packet from the per-lun list
11493 11460 */
11494 11461 mutex_enter(&plun->lun_mutex);
11495 11462 if (cmd->cmd_back) {
11496 11463 ASSERT(cmd != plun->lun_pkt_head);
11497 11464 cmd->cmd_back->cmd_forw = cmd->cmd_forw;
11498 11465 } else {
11499 11466 ASSERT(cmd == plun->lun_pkt_head);
11500 11467 plun->lun_pkt_head = cmd->cmd_forw;
11501 11468 }
11502 11469
11503 11470 if (cmd->cmd_forw) {
11504 11471 cmd->cmd_forw->cmd_back = cmd->cmd_back;
11505 11472 } else {
11506 11473 ASSERT(cmd == plun->lun_pkt_tail);
11507 11474 plun->lun_pkt_tail = cmd->cmd_back;
11508 11475 }
11509 11476
11510 11477 mutex_exit(&plun->lun_mutex);
11511 11478
11512 11479 (void) fc_ulp_uninit_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt);
11513 11480 }
11514 11481
11515 11482 /*
11516 11483 * Routine for reset notification setup, to register or cancel.
11517 11484 * This function is called by SCSA
11518 11485 */
11519 11486 /*ARGSUSED*/
11520 11487 static int
11521 11488 fcp_scsi_reset_notify(struct scsi_address *ap, int flag,
11522 11489 void (*callback)(caddr_t), caddr_t arg)
11523 11490 {
11524 11491 struct fcp_port *pptr = ADDR2FCP(ap);
11525 11492
11526 11493 return (scsi_hba_reset_notify_setup(ap, flag, callback, arg,
11527 11494 &pptr->port_mutex, &pptr->port_reset_notify_listf));
11528 11495 }
11529 11496
11530 11497
11531 11498 static int
11532 11499 fcp_scsi_bus_get_eventcookie(dev_info_t *dip, dev_info_t *rdip, char *name,
11533 11500 ddi_eventcookie_t *event_cookiep)
11534 11501 {
11535 11502 struct fcp_port *pptr = fcp_dip2port(dip);
11536 11503
11537 11504 if (pptr == NULL) {
11538 11505 return (DDI_FAILURE);
11539 11506 }
11540 11507
11541 11508 return (ndi_event_retrieve_cookie(pptr->port_ndi_event_hdl, rdip, name,
11542 11509 event_cookiep, NDI_EVENT_NOPASS));
11543 11510 }
11544 11511
11545 11512
11546 11513 static int
11547 11514 fcp_scsi_bus_add_eventcall(dev_info_t *dip, dev_info_t *rdip,
11548 11515 ddi_eventcookie_t eventid, void (*callback)(), void *arg,
11549 11516 ddi_callback_id_t *cb_id)
11550 11517 {
11551 11518 struct fcp_port *pptr = fcp_dip2port(dip);
11552 11519
11553 11520 if (pptr == NULL) {
11554 11521 return (DDI_FAILURE);
11555 11522 }
11556 11523
11557 11524 return (ndi_event_add_callback(pptr->port_ndi_event_hdl, rdip,
11558 11525 eventid, callback, arg, NDI_SLEEP, cb_id));
11559 11526 }
11560 11527
11561 11528
11562 11529 static int
11563 11530 fcp_scsi_bus_remove_eventcall(dev_info_t *dip, ddi_callback_id_t cb_id)
11564 11531 {
11565 11532
11566 11533 struct fcp_port *pptr = fcp_dip2port(dip);
11567 11534
11568 11535 if (pptr == NULL) {
11569 11536 return (DDI_FAILURE);
11570 11537 }
11571 11538 return (ndi_event_remove_callback(pptr->port_ndi_event_hdl, cb_id));
11572 11539 }
11573 11540
11574 11541
11575 11542 /*
11576 11543 * called by the transport to post an event
11577 11544 */
11578 11545 static int
11579 11546 fcp_scsi_bus_post_event(dev_info_t *dip, dev_info_t *rdip,
11580 11547 ddi_eventcookie_t eventid, void *impldata)
11581 11548 {
11582 11549 struct fcp_port *pptr = fcp_dip2port(dip);
11583 11550
11584 11551 if (pptr == NULL) {
11585 11552 return (DDI_FAILURE);
11586 11553 }
11587 11554
11588 11555 return (ndi_event_run_callbacks(pptr->port_ndi_event_hdl, rdip,
11589 11556 eventid, impldata));
11590 11557 }
11591 11558
11592 11559
11593 11560 /*
11594 11561 * A target in in many cases in Fibre Channel has a one to one relation
11595 11562 * with a port identifier (which is also known as D_ID and also as AL_PA
11596 11563 * in private Loop) On Fibre Channel-to-SCSI bridge boxes a target reset
11597 11564 * will most likely result in resetting all LUNs (which means a reset will
11598 11565 * occur on all the SCSI devices connected at the other end of the bridge)
11599 11566 * That is the latest favorite topic for discussion, for, one can debate as
11600 11567 * hot as one likes and come up with arguably a best solution to one's
11601 11568 * satisfaction
11602 11569 *
11603 11570 * To stay on track and not digress much, here are the problems stated
11604 11571 * briefly:
11605 11572 *
11606 11573 * SCSA doesn't define RESET_LUN, It defines RESET_TARGET, but the
11607 11574 * target drivers use RESET_TARGET even if their instance is on a
11608 11575 * LUN. Doesn't that sound a bit broken ?
11609 11576 *
11610 11577 * FCP SCSI (the current spec) only defines RESET TARGET in the
11611 11578 * control fields of an FCP_CMND structure. It should have been
11612 11579 * fixed right there, giving flexibility to the initiators to
11613 11580 * minimize havoc that could be caused by resetting a target.
11614 11581 */
11615 11582 static int
11616 11583 fcp_reset_target(struct scsi_address *ap, int level)
11617 11584 {
11618 11585 int rval = FC_FAILURE;
11619 11586 char lun_id[25];
11620 11587 struct fcp_port *pptr = ADDR2FCP(ap);
11621 11588 struct fcp_lun *plun = ADDR2LUN(ap);
11622 11589 struct fcp_tgt *ptgt = plun->lun_tgt;
11623 11590 struct scsi_pkt *pkt;
11624 11591 struct fcp_pkt *cmd;
11625 11592 struct fcp_rsp *rsp;
11626 11593 uint32_t tgt_cnt;
11627 11594 struct fcp_rsp_info *rsp_info;
11628 11595 struct fcp_reset_elem *p;
11629 11596 int bval;
11630 11597
11631 11598 if ((p = kmem_alloc(sizeof (struct fcp_reset_elem),
11632 11599 KM_NOSLEEP)) == NULL) {
11633 11600 return (rval);
11634 11601 }
11635 11602
11636 11603 mutex_enter(&ptgt->tgt_mutex);
11637 11604 if (level == RESET_TARGET) {
11638 11605 if (ptgt->tgt_state & (FCP_TGT_OFFLINE | FCP_TGT_BUSY)) {
11639 11606 mutex_exit(&ptgt->tgt_mutex);
11640 11607 kmem_free(p, sizeof (struct fcp_reset_elem));
11641 11608 return (rval);
11642 11609 }
11643 11610 fcp_update_tgt_state(ptgt, FCP_SET, FCP_LUN_BUSY);
11644 11611 (void) strcpy(lun_id, " ");
11645 11612 } else {
11646 11613 if (plun->lun_state & (FCP_LUN_OFFLINE | FCP_LUN_BUSY)) {
11647 11614 mutex_exit(&ptgt->tgt_mutex);
11648 11615 kmem_free(p, sizeof (struct fcp_reset_elem));
11649 11616 return (rval);
11650 11617 }
11651 11618 fcp_update_lun_state(plun, FCP_SET, FCP_LUN_BUSY);
11652 11619
11653 11620 (void) sprintf(lun_id, ", LUN=%d", plun->lun_num);
11654 11621 }
11655 11622 tgt_cnt = ptgt->tgt_change_cnt;
11656 11623
11657 11624 mutex_exit(&ptgt->tgt_mutex);
11658 11625
11659 11626 if ((pkt = scsi_init_pkt(ap, NULL, NULL, 0, 0,
11660 11627 0, 0, NULL, 0)) == NULL) {
11661 11628 kmem_free(p, sizeof (struct fcp_reset_elem));
11662 11629 mutex_enter(&ptgt->tgt_mutex);
11663 11630 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11664 11631 mutex_exit(&ptgt->tgt_mutex);
11665 11632 return (rval);
11666 11633 }
11667 11634 pkt->pkt_time = FCP_POLL_TIMEOUT;
11668 11635
11669 11636 /* fill in cmd part of packet */
11670 11637 cmd = PKT2CMD(pkt);
11671 11638 if (level == RESET_TARGET) {
11672 11639 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_tgt = 1;
11673 11640 } else {
11674 11641 cmd->cmd_fcp_cmd.fcp_cntl.cntl_reset_lun = 1;
11675 11642 }
11676 11643 cmd->cmd_fp_pkt->pkt_comp = NULL;
11677 11644 cmd->cmd_pkt->pkt_flags |= FLAG_NOINTR;
11678 11645
11679 11646 /* prepare a packet for transport */
11680 11647 fcp_prepare_pkt(pptr, cmd, plun);
11681 11648
11682 11649 if (cmd->cmd_pkt->pkt_time) {
11683 11650 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
11684 11651 } else {
11685 11652 cmd->cmd_fp_pkt->pkt_timeout = 5 * 60 * 60;
11686 11653 }
11687 11654
11688 11655 (void) fc_ulp_busy_port(pptr->port_fp_handle);
11689 11656 bval = fcp_dopoll(pptr, cmd);
11690 11657 fc_ulp_idle_port(pptr->port_fp_handle);
11691 11658
11692 11659 /* submit the packet */
11693 11660 if (bval == TRAN_ACCEPT) {
11694 11661 int error = 3;
11695 11662
11696 11663 rsp = (struct fcp_rsp *)cmd->cmd_fcp_rsp;
11697 11664 rsp_info = (struct fcp_rsp_info *)(cmd->cmd_fcp_rsp +
11698 11665 sizeof (struct fcp_rsp));
11699 11666
11700 11667 if (rsp->fcp_u.fcp_status.rsp_len_set) {
11701 11668 if (fcp_validate_fcp_response(rsp, pptr) ==
11702 11669 FC_SUCCESS) {
11703 11670 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
11704 11671 FCP_CP_IN(cmd->cmd_fp_pkt->pkt_resp +
11705 11672 sizeof (struct fcp_rsp), rsp_info,
11706 11673 cmd->cmd_fp_pkt->pkt_resp_acc,
11707 11674 sizeof (struct fcp_rsp_info));
11708 11675 }
11709 11676 if (rsp_info->rsp_code == FCP_NO_FAILURE) {
11710 11677 rval = FC_SUCCESS;
11711 11678 error = 0;
11712 11679 } else {
11713 11680 error = 1;
11714 11681 }
11715 11682 } else {
11716 11683 error = 2;
11717 11684 }
11718 11685 }
11719 11686
11720 11687 switch (error) {
11721 11688 case 0:
11722 11689 fcp_log(CE_WARN, pptr->port_dip,
11723 11690 "!FCP: WWN 0x%08x%08x %s reset successfully",
11724 11691 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11725 11692 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11726 11693 break;
11727 11694
11728 11695 case 1:
11729 11696 fcp_log(CE_WARN, pptr->port_dip,
11730 11697 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11731 11698 " response code=%x",
11732 11699 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11733 11700 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11734 11701 rsp_info->rsp_code);
11735 11702 break;
11736 11703
11737 11704 case 2:
11738 11705 fcp_log(CE_WARN, pptr->port_dip,
11739 11706 "!FCP: Reset to WWN 0x%08x%08x %s failed,"
11740 11707 " Bad FCP response values: rsvd1=%x,"
11741 11708 " rsvd2=%x, sts-rsvd1=%x, sts-rsvd2=%x,"
11742 11709 " rsplen=%x, senselen=%x",
11743 11710 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11744 11711 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id,
11745 11712 rsp->reserved_0, rsp->reserved_1,
11746 11713 rsp->fcp_u.fcp_status.reserved_0,
11747 11714 rsp->fcp_u.fcp_status.reserved_1,
11748 11715 rsp->fcp_response_len, rsp->fcp_sense_len);
11749 11716 break;
11750 11717
11751 11718 default:
11752 11719 fcp_log(CE_WARN, pptr->port_dip,
11753 11720 "!FCP: Reset to WWN 0x%08x%08x %s failed",
11754 11721 *((int *)&ptgt->tgt_port_wwn.raw_wwn[0]),
11755 11722 *((int *)&ptgt->tgt_port_wwn.raw_wwn[4]), lun_id);
11756 11723 break;
11757 11724 }
11758 11725 }
11759 11726 scsi_destroy_pkt(pkt);
11760 11727
11761 11728 if (rval == FC_FAILURE) {
11762 11729 mutex_enter(&ptgt->tgt_mutex);
11763 11730 if (level == RESET_TARGET) {
11764 11731 fcp_update_tgt_state(ptgt, FCP_RESET, FCP_LUN_BUSY);
11765 11732 } else {
11766 11733 fcp_update_lun_state(plun, FCP_RESET, FCP_LUN_BUSY);
11767 11734 }
11768 11735 mutex_exit(&ptgt->tgt_mutex);
11769 11736 kmem_free(p, sizeof (struct fcp_reset_elem));
11770 11737 return (rval);
11771 11738 }
11772 11739
11773 11740 mutex_enter(&pptr->port_mutex);
11774 11741 if (level == RESET_TARGET) {
11775 11742 p->tgt = ptgt;
11776 11743 p->lun = NULL;
11777 11744 } else {
11778 11745 p->tgt = NULL;
11779 11746 p->lun = plun;
11780 11747 }
11781 11748 p->tgt = ptgt;
11782 11749 p->tgt_cnt = tgt_cnt;
11783 11750 p->timeout = fcp_watchdog_time + FCP_RESET_DELAY;
11784 11751 p->next = pptr->port_reset_list;
11785 11752 pptr->port_reset_list = p;
11786 11753
11787 11754 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11788 11755 fcp_trace, FCP_BUF_LEVEL_3, 0,
11789 11756 "Notify ssd of the reset to reinstate the reservations");
11790 11757
11791 11758 scsi_hba_reset_notify_callback(&pptr->port_mutex,
11792 11759 &pptr->port_reset_notify_listf);
11793 11760
11794 11761 mutex_exit(&pptr->port_mutex);
11795 11762
11796 11763 return (rval);
11797 11764 }
11798 11765
11799 11766
11800 11767 /*
11801 11768 * called by fcp_getcap and fcp_setcap to get and set (respectively)
11802 11769 * SCSI capabilities
11803 11770 */
11804 11771 /* ARGSUSED */
11805 11772 static int
11806 11773 fcp_commoncap(struct scsi_address *ap, char *cap,
11807 11774 int val, int tgtonly, int doset)
11808 11775 {
11809 11776 struct fcp_port *pptr = ADDR2FCP(ap);
11810 11777 struct fcp_lun *plun = ADDR2LUN(ap);
11811 11778 struct fcp_tgt *ptgt = plun->lun_tgt;
11812 11779 int cidx;
11813 11780 int rval = FALSE;
11814 11781
11815 11782 if (cap == (char *)0) {
11816 11783 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11817 11784 fcp_trace, FCP_BUF_LEVEL_3, 0,
11818 11785 "fcp_commoncap: invalid arg");
11819 11786 return (rval);
11820 11787 }
11821 11788
11822 11789 if ((cidx = scsi_hba_lookup_capstr(cap)) == -1) {
11823 11790 return (UNDEFINED);
11824 11791 }
11825 11792
11826 11793 /*
11827 11794 * Process setcap request.
11828 11795 */
11829 11796 if (doset) {
11830 11797 /*
11831 11798 * At present, we can only set binary (0/1) values
11832 11799 */
11833 11800 switch (cidx) {
11834 11801 case SCSI_CAP_ARQ:
11835 11802 if (val == 0) {
11836 11803 rval = FALSE;
11837 11804 } else {
11838 11805 rval = TRUE;
11839 11806 }
11840 11807 break;
11841 11808
11842 11809 case SCSI_CAP_LUN_RESET:
11843 11810 if (val) {
11844 11811 plun->lun_cap |= FCP_LUN_CAP_RESET;
11845 11812 } else {
11846 11813 plun->lun_cap &= ~FCP_LUN_CAP_RESET;
11847 11814 }
11848 11815 rval = TRUE;
11849 11816 break;
11850 11817
11851 11818 case SCSI_CAP_SECTOR_SIZE:
11852 11819 rval = TRUE;
11853 11820 break;
11854 11821 default:
11855 11822 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11856 11823 fcp_trace, FCP_BUF_LEVEL_4, 0,
11857 11824 "fcp_setcap: unsupported %d", cidx);
11858 11825 rval = UNDEFINED;
11859 11826 break;
11860 11827 }
11861 11828
11862 11829 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11863 11830 fcp_trace, FCP_BUF_LEVEL_5, 0,
11864 11831 "set cap: cap=%s, val/tgtonly/doset/rval = "
11865 11832 "0x%x/0x%x/0x%x/%d",
11866 11833 cap, val, tgtonly, doset, rval);
11867 11834
11868 11835 } else {
11869 11836 /*
11870 11837 * Process getcap request.
11871 11838 */
11872 11839 switch (cidx) {
11873 11840 case SCSI_CAP_DMA_MAX:
11874 11841 rval = (int)pptr->port_data_dma_attr.dma_attr_maxxfer;
11875 11842
11876 11843 /*
11877 11844 * Need to make an adjustment qlc is uint_t 64
11878 11845 * st is int, so we will make the adjustment here
11879 11846 * being as nobody wants to touch this.
11880 11847 * It still leaves the max single block length
11881 11848 * of 2 gig. This should last .
11882 11849 */
11883 11850
11884 11851 if (rval == -1) {
11885 11852 rval = MAX_INT_DMA;
11886 11853 }
11887 11854
11888 11855 break;
11889 11856
11890 11857 case SCSI_CAP_INITIATOR_ID:
11891 11858 rval = pptr->port_id;
11892 11859 break;
11893 11860
11894 11861 case SCSI_CAP_ARQ:
11895 11862 case SCSI_CAP_RESET_NOTIFICATION:
11896 11863 case SCSI_CAP_TAGGED_QING:
11897 11864 rval = TRUE;
11898 11865 break;
11899 11866
11900 11867 case SCSI_CAP_SCSI_VERSION:
11901 11868 rval = 3;
11902 11869 break;
11903 11870
11904 11871 case SCSI_CAP_INTERCONNECT_TYPE:
11905 11872 if (FC_TOP_EXTERNAL(pptr->port_topology) ||
11906 11873 (ptgt->tgt_hard_addr == 0)) {
11907 11874 rval = INTERCONNECT_FABRIC;
11908 11875 } else {
11909 11876 rval = INTERCONNECT_FIBRE;
11910 11877 }
11911 11878 break;
11912 11879
11913 11880 case SCSI_CAP_LUN_RESET:
11914 11881 rval = ((plun->lun_cap & FCP_LUN_CAP_RESET) != 0) ?
11915 11882 TRUE : FALSE;
11916 11883 break;
11917 11884
11918 11885 default:
11919 11886 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11920 11887 fcp_trace, FCP_BUF_LEVEL_4, 0,
11921 11888 "fcp_getcap: unsupported %d", cidx);
11922 11889 rval = UNDEFINED;
11923 11890 break;
11924 11891 }
11925 11892
11926 11893 FCP_TRACE(fcp_logq, pptr->port_instbuf,
11927 11894 fcp_trace, FCP_BUF_LEVEL_8, 0,
11928 11895 "get cap: cap=%s, val/tgtonly/doset/rval = "
11929 11896 "0x%x/0x%x/0x%x/%d",
11930 11897 cap, val, tgtonly, doset, rval);
11931 11898 }
11932 11899
11933 11900 return (rval);
11934 11901 }
11935 11902
11936 11903 /*
11937 11904 * called by the transport to get the port-wwn and lun
11938 11905 * properties of this device, and to create a "name" based on them
11939 11906 *
11940 11907 * these properties don't exist on sun4m
11941 11908 *
11942 11909 * return 1 for success else return 0
11943 11910 */
11944 11911 /* ARGSUSED */
11945 11912 static int
11946 11913 fcp_scsi_get_name(struct scsi_device *sd, char *name, int len)
11947 11914 {
11948 11915 int i;
11949 11916 int *lun;
11950 11917 int numChars;
11951 11918 uint_t nlun;
11952 11919 uint_t count;
11953 11920 uint_t nbytes;
11954 11921 uchar_t *bytes;
11955 11922 uint16_t lun_num;
11956 11923 uint32_t tgt_id;
11957 11924 char **conf_wwn;
11958 11925 char tbuf[(FC_WWN_SIZE << 1) + 1];
11959 11926 uchar_t barray[FC_WWN_SIZE];
11960 11927 dev_info_t *tgt_dip;
11961 11928 struct fcp_tgt *ptgt;
11962 11929 struct fcp_port *pptr;
11963 11930 struct fcp_lun *plun;
11964 11931
11965 11932 ASSERT(sd != NULL);
11966 11933 ASSERT(name != NULL);
11967 11934
11968 11935 tgt_dip = sd->sd_dev;
11969 11936 pptr = ddi_get_soft_state(fcp_softstate,
11970 11937 ddi_get_instance(ddi_get_parent(tgt_dip)));
11971 11938 if (pptr == NULL) {
11972 11939 return (0);
11973 11940 }
11974 11941
11975 11942 ASSERT(tgt_dip != NULL);
11976 11943
11977 11944 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sd->sd_dev,
11978 11945 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
11979 11946 LUN_PROP, &lun, &nlun) != DDI_SUCCESS) {
11980 11947 name[0] = '\0';
11981 11948 return (0);
11982 11949 }
11983 11950
11984 11951 if (nlun == 0) {
11985 11952 ddi_prop_free(lun);
11986 11953 return (0);
11987 11954 }
11988 11955
11989 11956 lun_num = lun[0];
11990 11957 ddi_prop_free(lun);
11991 11958
11992 11959 /*
11993 11960 * Lookup for .conf WWN property
11994 11961 */
11995 11962 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, tgt_dip,
11996 11963 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, CONF_WWN_PROP,
11997 11964 &conf_wwn, &count) == DDI_PROP_SUCCESS) {
11998 11965 ASSERT(count >= 1);
11999 11966
12000 11967 fcp_ascii_to_wwn(conf_wwn[0], barray, FC_WWN_SIZE);
12001 11968 ddi_prop_free(conf_wwn);
12002 11969 mutex_enter(&pptr->port_mutex);
12003 11970 if ((plun = fcp_lookup_lun(pptr, barray, lun_num)) == NULL) {
12004 11971 mutex_exit(&pptr->port_mutex);
12005 11972 return (0);
12006 11973 }
12007 11974 ptgt = plun->lun_tgt;
12008 11975 mutex_exit(&pptr->port_mutex);
12009 11976
12010 11977 (void) ndi_prop_update_byte_array(DDI_DEV_T_NONE,
12011 11978 tgt_dip, PORT_WWN_PROP, barray, FC_WWN_SIZE);
12012 11979
12013 11980 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12014 11981 ptgt->tgt_hard_addr != 0) {
12015 11982 tgt_id = (uint32_t)fcp_alpa_to_switch[
12016 11983 ptgt->tgt_hard_addr];
12017 11984 } else {
12018 11985 tgt_id = ptgt->tgt_d_id;
12019 11986 }
12020 11987
12021 11988 (void) ndi_prop_update_int(DDI_DEV_T_NONE, tgt_dip,
12022 11989 TARGET_PROP, tgt_id);
12023 11990 }
12024 11991
12025 11992 /* get the our port-wwn property */
12026 11993 bytes = NULL;
12027 11994 if ((ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, tgt_dip,
12028 11995 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12029 11996 &nbytes) != DDI_PROP_SUCCESS) || nbytes != FC_WWN_SIZE) {
12030 11997 if (bytes != NULL) {
12031 11998 ddi_prop_free(bytes);
12032 11999 }
12033 12000 return (0);
12034 12001 }
12035 12002
12036 12003 for (i = 0; i < FC_WWN_SIZE; i++) {
12037 12004 (void) sprintf(&tbuf[i << 1], "%02x", *(bytes + i));
12038 12005 }
12039 12006
12040 12007 /* Stick in the address of the form "wWWN,LUN" */
12041 12008 numChars = snprintf(name, len, "w%s,%x", tbuf, lun_num);
12042 12009
12043 12010 ASSERT(numChars < len);
12044 12011 if (numChars >= len) {
12045 12012 fcp_log(CE_WARN, pptr->port_dip,
12046 12013 "!fcp_scsi_get_name: "
12047 12014 "name parameter length too small, it needs to be %d",
12048 12015 numChars+1);
12049 12016 }
12050 12017
12051 12018 ddi_prop_free(bytes);
12052 12019
12053 12020 return (1);
12054 12021 }
12055 12022
12056 12023
12057 12024 /*
12058 12025 * called by the transport to get the SCSI target id value, returning
12059 12026 * it in "name"
12060 12027 *
12061 12028 * this isn't needed/used on sun4m
12062 12029 *
12063 12030 * return 1 for success else return 0
12064 12031 */
12065 12032 /* ARGSUSED */
12066 12033 static int
12067 12034 fcp_scsi_get_bus_addr(struct scsi_device *sd, char *name, int len)
12068 12035 {
12069 12036 struct fcp_lun *plun = ADDR2LUN(&sd->sd_address);
12070 12037 struct fcp_tgt *ptgt;
12071 12038 int numChars;
12072 12039
12073 12040 if (plun == NULL) {
12074 12041 return (0);
12075 12042 }
12076 12043
12077 12044 if ((ptgt = plun->lun_tgt) == NULL) {
12078 12045 return (0);
12079 12046 }
12080 12047
12081 12048 numChars = snprintf(name, len, "%x", ptgt->tgt_d_id);
12082 12049
12083 12050 ASSERT(numChars < len);
12084 12051 if (numChars >= len) {
12085 12052 fcp_log(CE_WARN, NULL,
12086 12053 "!fcp_scsi_get_bus_addr: "
12087 12054 "name parameter length too small, it needs to be %d",
12088 12055 numChars+1);
12089 12056 }
12090 12057
12091 12058 return (1);
12092 12059 }
12093 12060
12094 12061
12095 12062 /*
12096 12063 * called internally to reset the link where the specified port lives
12097 12064 */
12098 12065 static int
12099 12066 fcp_linkreset(struct fcp_port *pptr, struct scsi_address *ap, int sleep)
12100 12067 {
12101 12068 la_wwn_t wwn;
12102 12069 struct fcp_lun *plun;
12103 12070 struct fcp_tgt *ptgt;
12104 12071
12105 12072 /* disable restart of lip if we're suspended */
12106 12073 mutex_enter(&pptr->port_mutex);
12107 12074
12108 12075 if (pptr->port_state & (FCP_STATE_SUSPENDED |
12109 12076 FCP_STATE_POWER_DOWN)) {
12110 12077 mutex_exit(&pptr->port_mutex);
12111 12078 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12112 12079 fcp_trace, FCP_BUF_LEVEL_2, 0,
12113 12080 "fcp_linkreset, fcp%d: link reset "
12114 12081 "disabled due to DDI_SUSPEND",
12115 12082 ddi_get_instance(pptr->port_dip));
12116 12083 return (FC_FAILURE);
12117 12084 }
12118 12085
12119 12086 if (pptr->port_state & (FCP_STATE_OFFLINE | FCP_STATE_ONLINING)) {
12120 12087 mutex_exit(&pptr->port_mutex);
12121 12088 return (FC_SUCCESS);
12122 12089 }
12123 12090
12124 12091 FCP_DTRACE(fcp_logq, pptr->port_instbuf,
12125 12092 fcp_trace, FCP_BUF_LEVEL_8, 0, "Forcing link reset");
12126 12093
12127 12094 /*
12128 12095 * If ap == NULL assume local link reset.
12129 12096 */
12130 12097 if (FC_TOP_EXTERNAL(pptr->port_topology) && (ap != NULL)) {
12131 12098 plun = ADDR2LUN(ap);
12132 12099 ptgt = plun->lun_tgt;
12133 12100 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &wwn, sizeof (wwn));
12134 12101 } else {
12135 12102 bzero((caddr_t)&wwn, sizeof (wwn));
12136 12103 }
12137 12104 mutex_exit(&pptr->port_mutex);
12138 12105
12139 12106 return (fc_ulp_linkreset(pptr->port_fp_handle, &wwn, sleep));
12140 12107 }
12141 12108
12142 12109
12143 12110 /*
12144 12111 * called from fcp_port_attach() to resume a port
12145 12112 * return DDI_* success/failure status
12146 12113 * acquires and releases the global mutex
12147 12114 * acquires and releases the port mutex
12148 12115 */
12149 12116 /*ARGSUSED*/
12150 12117
12151 12118 static int
12152 12119 fcp_handle_port_resume(opaque_t ulph, fc_ulp_port_info_t *pinfo,
12153 12120 uint32_t s_id, fc_attach_cmd_t cmd, int instance)
12154 12121 {
12155 12122 int res = DDI_FAILURE; /* default result */
12156 12123 struct fcp_port *pptr; /* port state ptr */
12157 12124 uint32_t alloc_cnt;
12158 12125 uint32_t max_cnt;
12159 12126 fc_portmap_t *tmp_list = NULL;
12160 12127
12161 12128 FCP_DTRACE(fcp_logq, "fcp", fcp_trace,
12162 12129 FCP_BUF_LEVEL_8, 0, "port resume: for port %d",
12163 12130 instance);
12164 12131
12165 12132 if ((pptr = ddi_get_soft_state(fcp_softstate, instance)) == NULL) {
12166 12133 cmn_err(CE_WARN, "fcp: bad soft state");
12167 12134 return (res);
12168 12135 }
12169 12136
12170 12137 mutex_enter(&pptr->port_mutex);
12171 12138 switch (cmd) {
12172 12139 case FC_CMD_RESUME:
12173 12140 ASSERT((pptr->port_state & FCP_STATE_POWER_DOWN) == 0);
12174 12141 pptr->port_state &= ~FCP_STATE_SUSPENDED;
12175 12142 break;
12176 12143
12177 12144 case FC_CMD_POWER_UP:
12178 12145 /*
12179 12146 * If the port is DDI_SUSPENded, defer rediscovery
12180 12147 * until DDI_RESUME occurs
12181 12148 */
12182 12149 if (pptr->port_state & FCP_STATE_SUSPENDED) {
12183 12150 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12184 12151 mutex_exit(&pptr->port_mutex);
12185 12152 return (DDI_SUCCESS);
12186 12153 }
12187 12154 pptr->port_state &= ~FCP_STATE_POWER_DOWN;
12188 12155 }
12189 12156 pptr->port_id = s_id;
12190 12157 pptr->port_state = FCP_STATE_INIT;
12191 12158 mutex_exit(&pptr->port_mutex);
12192 12159
12193 12160 /*
12194 12161 * Make a copy of ulp_port_info as fctl allocates
12195 12162 * a temp struct.
12196 12163 */
12197 12164 (void) fcp_cp_pinfo(pptr, pinfo);
12198 12165
12199 12166 mutex_enter(&fcp_global_mutex);
12200 12167 if (fcp_watchdog_init++ == 0) {
12201 12168 fcp_watchdog_tick = fcp_watchdog_timeout *
12202 12169 drv_usectohz(1000000);
12203 12170 fcp_watchdog_id = timeout(fcp_watch,
12204 12171 NULL, fcp_watchdog_tick);
12205 12172 }
12206 12173 mutex_exit(&fcp_global_mutex);
12207 12174
12208 12175 /*
12209 12176 * Handle various topologies and link states.
12210 12177 */
12211 12178 switch (FC_PORT_STATE_MASK(pptr->port_phys_state)) {
12212 12179 case FC_STATE_OFFLINE:
12213 12180 /*
12214 12181 * Wait for ONLINE, at which time a state
12215 12182 * change will cause a statec_callback
12216 12183 */
12217 12184 res = DDI_SUCCESS;
12218 12185 break;
12219 12186
12220 12187 case FC_STATE_ONLINE:
12221 12188
12222 12189 if (pptr->port_topology == FC_TOP_UNKNOWN) {
12223 12190 (void) fcp_linkreset(pptr, NULL, KM_NOSLEEP);
12224 12191 res = DDI_SUCCESS;
12225 12192 break;
12226 12193 }
12227 12194
12228 12195 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
12229 12196 !fcp_enable_auto_configuration) {
12230 12197 tmp_list = fcp_construct_map(pptr, &alloc_cnt);
12231 12198 if (tmp_list == NULL) {
12232 12199 if (!alloc_cnt) {
12233 12200 res = DDI_SUCCESS;
12234 12201 }
12235 12202 break;
12236 12203 }
12237 12204 max_cnt = alloc_cnt;
12238 12205 } else {
12239 12206 ASSERT(pptr->port_topology != FC_TOP_UNKNOWN);
12240 12207
12241 12208 alloc_cnt = FCP_MAX_DEVICES;
12242 12209
12243 12210 if ((tmp_list = (fc_portmap_t *)kmem_zalloc(
12244 12211 (sizeof (fc_portmap_t)) * alloc_cnt,
12245 12212 KM_NOSLEEP)) == NULL) {
12246 12213 fcp_log(CE_WARN, pptr->port_dip,
12247 12214 "!fcp%d: failed to allocate portmap",
12248 12215 instance);
12249 12216 break;
12250 12217 }
12251 12218
12252 12219 max_cnt = alloc_cnt;
12253 12220 if ((res = fc_ulp_getportmap(pptr->port_fp_handle,
12254 12221 &tmp_list, &max_cnt, FC_ULP_PLOGI_PRESERVE)) !=
12255 12222 FC_SUCCESS) {
12256 12223 caddr_t msg;
12257 12224
12258 12225 (void) fc_ulp_error(res, &msg);
12259 12226
12260 12227 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12261 12228 fcp_trace, FCP_BUF_LEVEL_2, 0,
12262 12229 "resume failed getportmap: reason=0x%x",
12263 12230 res);
12264 12231
12265 12232 fcp_log(CE_WARN, pptr->port_dip,
12266 12233 "!failed to get port map : %s", msg);
12267 12234 break;
12268 12235 }
12269 12236 if (max_cnt > alloc_cnt) {
12270 12237 alloc_cnt = max_cnt;
12271 12238 }
12272 12239 }
12273 12240
12274 12241 /*
12275 12242 * do the SCSI device discovery and create
12276 12243 * the devinfos
12277 12244 */
12278 12245 fcp_statec_callback(ulph, pptr->port_fp_handle,
12279 12246 pptr->port_phys_state, pptr->port_topology, tmp_list,
12280 12247 max_cnt, pptr->port_id);
12281 12248
12282 12249 res = DDI_SUCCESS;
12283 12250 break;
12284 12251
12285 12252 default:
12286 12253 fcp_log(CE_WARN, pptr->port_dip,
12287 12254 "!fcp%d: invalid port state at attach=0x%x",
12288 12255 instance, pptr->port_phys_state);
12289 12256
12290 12257 mutex_enter(&pptr->port_mutex);
12291 12258 pptr->port_phys_state = FCP_STATE_OFFLINE;
12292 12259 mutex_exit(&pptr->port_mutex);
12293 12260 res = DDI_SUCCESS;
12294 12261
12295 12262 break;
12296 12263 }
12297 12264
12298 12265 if (tmp_list != NULL) {
12299 12266 kmem_free(tmp_list, sizeof (fc_portmap_t) * alloc_cnt);
12300 12267 }
12301 12268
12302 12269 return (res);
12303 12270 }
12304 12271
12305 12272
12306 12273 static void
12307 12274 fcp_cp_pinfo(struct fcp_port *pptr, fc_ulp_port_info_t *pinfo)
12308 12275 {
12309 12276 pptr->port_fp_modlinkage = *pinfo->port_linkage;
12310 12277 pptr->port_dip = pinfo->port_dip;
12311 12278 pptr->port_fp_handle = pinfo->port_handle;
12312 12279 if (pinfo->port_acc_attr != NULL) {
12313 12280 /*
12314 12281 * FCA supports DMA
12315 12282 */
12316 12283 pptr->port_data_dma_attr = *pinfo->port_data_dma_attr;
12317 12284 pptr->port_cmd_dma_attr = *pinfo->port_cmd_dma_attr;
12318 12285 pptr->port_resp_dma_attr = *pinfo->port_resp_dma_attr;
12319 12286 pptr->port_dma_acc_attr = *pinfo->port_acc_attr;
12320 12287 }
12321 12288 pptr->port_priv_pkt_len = pinfo->port_fca_pkt_size;
12322 12289 pptr->port_max_exch = pinfo->port_fca_max_exch;
12323 12290 pptr->port_phys_state = pinfo->port_state;
12324 12291 pptr->port_topology = pinfo->port_flags;
12325 12292 pptr->port_reset_action = pinfo->port_reset_action;
12326 12293 pptr->port_cmds_dma_flags = pinfo->port_dma_behavior;
12327 12294 pptr->port_fcp_dma = pinfo->port_fcp_dma;
12328 12295 bcopy(&pinfo->port_nwwn, &pptr->port_nwwn, sizeof (la_wwn_t));
12329 12296 bcopy(&pinfo->port_pwwn, &pptr->port_pwwn, sizeof (la_wwn_t));
12330 12297
12331 12298 /* Clear FMA caps to avoid fm-capability ereport */
12332 12299 if (pptr->port_cmd_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12333 12300 pptr->port_cmd_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12334 12301 if (pptr->port_data_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12335 12302 pptr->port_data_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12336 12303 if (pptr->port_resp_dma_attr.dma_attr_flags & DDI_DMA_FLAGERR)
12337 12304 pptr->port_resp_dma_attr.dma_attr_flags &= ~DDI_DMA_FLAGERR;
12338 12305 }
12339 12306
12340 12307 /*
12341 12308 * If the elements wait field is set to 1 then
12342 12309 * another thread is waiting for the operation to complete. Once
12343 12310 * it is complete, the waiting thread is signaled and the element is
12344 12311 * freed by the waiting thread. If the elements wait field is set to 0
12345 12312 * the element is freed.
12346 12313 */
12347 12314 static void
12348 12315 fcp_process_elem(struct fcp_hp_elem *elem, int result)
12349 12316 {
12350 12317 ASSERT(elem != NULL);
12351 12318 mutex_enter(&elem->mutex);
12352 12319 elem->result = result;
12353 12320 if (elem->wait) {
12354 12321 elem->wait = 0;
12355 12322 cv_signal(&elem->cv);
12356 12323 mutex_exit(&elem->mutex);
12357 12324 } else {
12358 12325 mutex_exit(&elem->mutex);
12359 12326 cv_destroy(&elem->cv);
12360 12327 mutex_destroy(&elem->mutex);
12361 12328 kmem_free(elem, sizeof (struct fcp_hp_elem));
12362 12329 }
12363 12330 }
12364 12331
12365 12332 /*
12366 12333 * This function is invoked from the taskq thread to allocate
12367 12334 * devinfo nodes and to online/offline them.
12368 12335 */
12369 12336 static void
12370 12337 fcp_hp_task(void *arg)
12371 12338 {
12372 12339 struct fcp_hp_elem *elem = (struct fcp_hp_elem *)arg;
12373 12340 struct fcp_lun *plun = elem->lun;
12374 12341 struct fcp_port *pptr = elem->port;
12375 12342 int result;
12376 12343
12377 12344 ASSERT(elem->what == FCP_ONLINE ||
12378 12345 elem->what == FCP_OFFLINE ||
12379 12346 elem->what == FCP_MPXIO_PATH_CLEAR_BUSY ||
12380 12347 elem->what == FCP_MPXIO_PATH_SET_BUSY);
12381 12348
12382 12349 mutex_enter(&pptr->port_mutex);
12383 12350 mutex_enter(&plun->lun_mutex);
12384 12351 if (((elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) &&
12385 12352 plun->lun_event_count != elem->event_cnt) ||
12386 12353 pptr->port_state & (FCP_STATE_SUSPENDED |
12387 12354 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
12388 12355 mutex_exit(&plun->lun_mutex);
12389 12356 mutex_exit(&pptr->port_mutex);
12390 12357 fcp_process_elem(elem, NDI_FAILURE);
12391 12358 return;
12392 12359 }
12393 12360 mutex_exit(&plun->lun_mutex);
12394 12361 mutex_exit(&pptr->port_mutex);
12395 12362
12396 12363 result = fcp_trigger_lun(plun, elem->cip, elem->old_lun_mpxio,
12397 12364 elem->what, elem->link_cnt, elem->tgt_cnt, elem->flags);
12398 12365 fcp_process_elem(elem, result);
12399 12366 }
12400 12367
12401 12368
12402 12369 static child_info_t *
12403 12370 fcp_get_cip(struct fcp_lun *plun, child_info_t *cip, int lcount,
12404 12371 int tcount)
12405 12372 {
12406 12373 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12407 12374
12408 12375 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
12409 12376 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
12410 12377
12411 12378 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12412 12379 /*
12413 12380 * Child has not been created yet. Create the child device
12414 12381 * based on the per-Lun flags.
12415 12382 */
12416 12383 if (pptr->port_mpxio == 0 || plun->lun_mpxio == 0) {
12417 12384 plun->lun_cip =
12418 12385 CIP(fcp_create_dip(plun, lcount, tcount));
12419 12386 plun->lun_mpxio = 0;
12420 12387 } else {
12421 12388 plun->lun_cip =
12422 12389 CIP(fcp_create_pip(plun, lcount, tcount));
12423 12390 plun->lun_mpxio = 1;
12424 12391 }
12425 12392 } else {
12426 12393 plun->lun_cip = cip;
12427 12394 }
12428 12395
12429 12396 return (plun->lun_cip);
12430 12397 }
12431 12398
12432 12399
12433 12400 static int
12434 12401 fcp_is_dip_present(struct fcp_lun *plun, dev_info_t *cdip)
12435 12402 {
12436 12403 int rval = FC_FAILURE;
12437 12404 dev_info_t *pdip;
12438 12405 struct dev_info *dip;
12439 12406 int circular;
12440 12407
12441 12408 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12442 12409
12443 12410 pdip = plun->lun_tgt->tgt_port->port_dip;
12444 12411
12445 12412 if (plun->lun_cip == NULL) {
12446 12413 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
12447 12414 fcp_trace, FCP_BUF_LEVEL_3, 0,
12448 12415 "fcp_is_dip_present: plun->lun_cip is NULL: "
12449 12416 "plun: %p lun state: %x num: %d target state: %x",
12450 12417 plun, plun->lun_state, plun->lun_num,
12451 12418 plun->lun_tgt->tgt_port->port_state);
12452 12419 return (rval);
12453 12420 }
12454 12421 ndi_devi_enter(pdip, &circular);
12455 12422 dip = DEVI(pdip)->devi_child;
12456 12423 while (dip) {
12457 12424 if (dip == DEVI(cdip)) {
12458 12425 rval = FC_SUCCESS;
12459 12426 break;
12460 12427 }
12461 12428 dip = dip->devi_sibling;
12462 12429 }
12463 12430 ndi_devi_exit(pdip, circular);
12464 12431 return (rval);
12465 12432 }
12466 12433
12467 12434 static int
12468 12435 fcp_is_child_present(struct fcp_lun *plun, child_info_t *cip)
12469 12436 {
12470 12437 int rval = FC_FAILURE;
12471 12438
12472 12439 ASSERT(plun != NULL);
12473 12440 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12474 12441
12475 12442 if (plun->lun_mpxio == 0) {
12476 12443 rval = fcp_is_dip_present(plun, DIP(cip));
12477 12444 } else {
12478 12445 rval = fcp_is_pip_present(plun, PIP(cip));
12479 12446 }
12480 12447
12481 12448 return (rval);
12482 12449 }
12483 12450
12484 12451 /*
12485 12452 * Function: fcp_create_dip
12486 12453 *
12487 12454 * Description: Creates a dev_info_t structure for the LUN specified by the
12488 12455 * caller.
12489 12456 *
12490 12457 * Argument: plun Lun structure
12491 12458 * link_cnt Link state count.
12492 12459 * tgt_cnt Target state change count.
12493 12460 *
12494 12461 * Return Value: NULL if it failed
12495 12462 * dev_info_t structure address if it succeeded
12496 12463 *
12497 12464 * Context: Kernel context
12498 12465 */
12499 12466 static dev_info_t *
12500 12467 fcp_create_dip(struct fcp_lun *plun, int link_cnt, int tgt_cnt)
12501 12468 {
12502 12469 int failure = 0;
12503 12470 uint32_t tgt_id;
12504 12471 uint64_t sam_lun;
12505 12472 struct fcp_tgt *ptgt = plun->lun_tgt;
12506 12473 struct fcp_port *pptr = ptgt->tgt_port;
12507 12474 dev_info_t *pdip = pptr->port_dip;
12508 12475 dev_info_t *cdip = NULL;
12509 12476 dev_info_t *old_dip = DIP(plun->lun_cip);
12510 12477 char *nname = NULL;
12511 12478 char **compatible = NULL;
12512 12479 int ncompatible;
12513 12480 char *scsi_binding_set;
12514 12481 char t_pwwn[17];
12515 12482
12516 12483 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12517 12484 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12518 12485
12519 12486 /* get the 'scsi-binding-set' property */
12520 12487 if (ddi_prop_lookup_string(DDI_DEV_T_ANY, pdip,
12521 12488 DDI_PROP_NOTPROM | DDI_PROP_DONTPASS, "scsi-binding-set",
12522 12489 &scsi_binding_set) != DDI_PROP_SUCCESS) {
12523 12490 scsi_binding_set = NULL;
12524 12491 }
12525 12492
12526 12493 /* determine the node name and compatible */
12527 12494 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12528 12495 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12529 12496 if (scsi_binding_set) {
12530 12497 ddi_prop_free(scsi_binding_set);
12531 12498 }
12532 12499
12533 12500 if (nname == NULL) {
12534 12501 #ifdef DEBUG
12535 12502 cmn_err(CE_WARN, "%s%d: no driver for "
12536 12503 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12537 12504 " compatible: %s",
12538 12505 ddi_driver_name(pdip), ddi_get_instance(pdip),
12539 12506 ptgt->tgt_port_wwn.raw_wwn[0],
12540 12507 ptgt->tgt_port_wwn.raw_wwn[1],
12541 12508 ptgt->tgt_port_wwn.raw_wwn[2],
12542 12509 ptgt->tgt_port_wwn.raw_wwn[3],
12543 12510 ptgt->tgt_port_wwn.raw_wwn[4],
12544 12511 ptgt->tgt_port_wwn.raw_wwn[5],
12545 12512 ptgt->tgt_port_wwn.raw_wwn[6],
12546 12513 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12547 12514 *compatible);
12548 12515 #endif /* DEBUG */
12549 12516 failure++;
12550 12517 goto end_of_fcp_create_dip;
12551 12518 }
12552 12519
12553 12520 cdip = fcp_find_existing_dip(plun, pdip, nname);
12554 12521
12555 12522 /*
12556 12523 * if the old_dip does not match the cdip, that means there is
12557 12524 * some property change. since we'll be using the cdip, we need
12558 12525 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12559 12526 * then the dtype for the device has been updated. Offline the
12560 12527 * the old device and create a new device with the new device type
12561 12528 * Refer to bug: 4764752
12562 12529 */
12563 12530 if (old_dip && (cdip != old_dip ||
12564 12531 plun->lun_state & FCP_LUN_CHANGED)) {
12565 12532 plun->lun_state &= ~(FCP_LUN_INIT);
12566 12533 mutex_exit(&plun->lun_mutex);
12567 12534 mutex_exit(&pptr->port_mutex);
12568 12535
12569 12536 mutex_enter(&ptgt->tgt_mutex);
12570 12537 (void) fcp_pass_to_hp(pptr, plun, CIP(old_dip), FCP_OFFLINE,
12571 12538 link_cnt, tgt_cnt, NDI_DEVI_REMOVE, 0);
12572 12539 mutex_exit(&ptgt->tgt_mutex);
12573 12540
12574 12541 #ifdef DEBUG
12575 12542 if (cdip != NULL) {
12576 12543 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12577 12544 fcp_trace, FCP_BUF_LEVEL_2, 0,
12578 12545 "Old dip=%p; New dip=%p don't match", old_dip,
12579 12546 cdip);
12580 12547 } else {
12581 12548 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12582 12549 fcp_trace, FCP_BUF_LEVEL_2, 0,
12583 12550 "Old dip=%p; New dip=NULL don't match", old_dip);
12584 12551 }
12585 12552 #endif
12586 12553
12587 12554 mutex_enter(&pptr->port_mutex);
12588 12555 mutex_enter(&plun->lun_mutex);
12589 12556 }
12590 12557
12591 12558 if (cdip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12592 12559 plun->lun_state &= ~(FCP_LUN_CHANGED);
12593 12560 if (ndi_devi_alloc(pptr->port_dip, nname,
12594 12561 DEVI_SID_NODEID, &cdip) != NDI_SUCCESS) {
12595 12562 failure++;
12596 12563 goto end_of_fcp_create_dip;
12597 12564 }
12598 12565 }
12599 12566
12600 12567 /*
12601 12568 * Previously all the properties for the devinfo were destroyed here
12602 12569 * with a call to ndi_prop_remove_all(). Since this may cause loss of
12603 12570 * the devid property (and other properties established by the target
12604 12571 * driver or framework) which the code does not always recreate, this
12605 12572 * call was removed.
12606 12573 * This opens a theoretical possibility that we may return with a
12607 12574 * stale devid on the node if the scsi entity behind the fibre channel
12608 12575 * lun has changed.
12609 12576 */
12610 12577
12611 12578 /* decorate the node with compatible */
12612 12579 if (ndi_prop_update_string_array(DDI_DEV_T_NONE, cdip,
12613 12580 "compatible", compatible, ncompatible) != DDI_PROP_SUCCESS) {
12614 12581 failure++;
12615 12582 goto end_of_fcp_create_dip;
12616 12583 }
12617 12584
12618 12585 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, NODE_WWN_PROP,
12619 12586 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12620 12587 failure++;
12621 12588 goto end_of_fcp_create_dip;
12622 12589 }
12623 12590
12624 12591 if (ndi_prop_update_byte_array(DDI_DEV_T_NONE, cdip, PORT_WWN_PROP,
12625 12592 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE) != DDI_PROP_SUCCESS) {
12626 12593 failure++;
12627 12594 goto end_of_fcp_create_dip;
12628 12595 }
12629 12596
12630 12597 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12631 12598 t_pwwn[16] = '\0';
12632 12599 if (ndi_prop_update_string(DDI_DEV_T_NONE, cdip, TGT_PORT_PROP, t_pwwn)
12633 12600 != DDI_PROP_SUCCESS) {
12634 12601 failure++;
12635 12602 goto end_of_fcp_create_dip;
12636 12603 }
12637 12604
12638 12605 /*
12639 12606 * If there is no hard address - We might have to deal with
12640 12607 * that by using WWN - Having said that it is important to
12641 12608 * recognize this problem early so ssd can be informed of
12642 12609 * the right interconnect type.
12643 12610 */
12644 12611 if (!FC_TOP_EXTERNAL(pptr->port_topology) && ptgt->tgt_hard_addr != 0) {
12645 12612 tgt_id = (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12646 12613 } else {
12647 12614 tgt_id = ptgt->tgt_d_id;
12648 12615 }
12649 12616
12650 12617 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, TARGET_PROP,
12651 12618 tgt_id) != DDI_PROP_SUCCESS) {
12652 12619 failure++;
12653 12620 goto end_of_fcp_create_dip;
12654 12621 }
12655 12622
12656 12623 if (ndi_prop_update_int(DDI_DEV_T_NONE, cdip, LUN_PROP,
12657 12624 (int)plun->lun_num) != DDI_PROP_SUCCESS) {
12658 12625 failure++;
12659 12626 goto end_of_fcp_create_dip;
12660 12627 }
12661 12628 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12662 12629 if (ndi_prop_update_int64(DDI_DEV_T_NONE, cdip, SAM_LUN_PROP,
12663 12630 sam_lun) != DDI_PROP_SUCCESS) {
12664 12631 failure++;
12665 12632 goto end_of_fcp_create_dip;
12666 12633 }
12667 12634
12668 12635 end_of_fcp_create_dip:
12669 12636 scsi_hba_nodename_compatible_free(nname, compatible);
12670 12637
12671 12638 if (cdip != NULL && failure) {
12672 12639 (void) ndi_prop_remove_all(cdip);
12673 12640 (void) ndi_devi_free(cdip);
12674 12641 cdip = NULL;
12675 12642 }
12676 12643
12677 12644 return (cdip);
12678 12645 }
12679 12646
12680 12647 /*
12681 12648 * Function: fcp_create_pip
12682 12649 *
12683 12650 * Description: Creates a Path Id for the LUN specified by the caller.
12684 12651 *
12685 12652 * Argument: plun Lun structure
12686 12653 * link_cnt Link state count.
12687 12654 * tgt_cnt Target state count.
12688 12655 *
12689 12656 * Return Value: NULL if it failed
12690 12657 * mdi_pathinfo_t structure address if it succeeded
12691 12658 *
12692 12659 * Context: Kernel context
12693 12660 */
12694 12661 static mdi_pathinfo_t *
12695 12662 fcp_create_pip(struct fcp_lun *plun, int lcount, int tcount)
12696 12663 {
12697 12664 int i;
12698 12665 char buf[MAXNAMELEN];
12699 12666 char uaddr[MAXNAMELEN];
12700 12667 int failure = 0;
12701 12668 uint32_t tgt_id;
12702 12669 uint64_t sam_lun;
12703 12670 struct fcp_tgt *ptgt = plun->lun_tgt;
12704 12671 struct fcp_port *pptr = ptgt->tgt_port;
12705 12672 dev_info_t *pdip = pptr->port_dip;
12706 12673 mdi_pathinfo_t *pip = NULL;
12707 12674 mdi_pathinfo_t *old_pip = PIP(plun->lun_cip);
12708 12675 char *nname = NULL;
12709 12676 char **compatible = NULL;
12710 12677 int ncompatible;
12711 12678 char *scsi_binding_set;
12712 12679 char t_pwwn[17];
12713 12680
12714 12681 ASSERT(MUTEX_HELD(&plun->lun_mutex));
12715 12682 ASSERT(MUTEX_HELD(&pptr->port_mutex));
12716 12683
12717 12684 scsi_binding_set = "vhci";
12718 12685
12719 12686 /* determine the node name and compatible */
12720 12687 scsi_hba_nodename_compatible_get(&plun->lun_inq, scsi_binding_set,
12721 12688 plun->lun_inq.inq_dtype, NULL, &nname, &compatible, &ncompatible);
12722 12689
12723 12690 if (nname == NULL) {
12724 12691 #ifdef DEBUG
12725 12692 cmn_err(CE_WARN, "fcp_create_dip: %s%d: no driver for "
12726 12693 "device @w%02x%02x%02x%02x%02x%02x%02x%02x,%d:"
12727 12694 " compatible: %s",
12728 12695 ddi_driver_name(pdip), ddi_get_instance(pdip),
12729 12696 ptgt->tgt_port_wwn.raw_wwn[0],
12730 12697 ptgt->tgt_port_wwn.raw_wwn[1],
12731 12698 ptgt->tgt_port_wwn.raw_wwn[2],
12732 12699 ptgt->tgt_port_wwn.raw_wwn[3],
12733 12700 ptgt->tgt_port_wwn.raw_wwn[4],
12734 12701 ptgt->tgt_port_wwn.raw_wwn[5],
12735 12702 ptgt->tgt_port_wwn.raw_wwn[6],
12736 12703 ptgt->tgt_port_wwn.raw_wwn[7], plun->lun_num,
12737 12704 *compatible);
12738 12705 #endif /* DEBUG */
12739 12706 failure++;
12740 12707 goto end_of_fcp_create_pip;
12741 12708 }
12742 12709
12743 12710 pip = fcp_find_existing_pip(plun, pdip);
12744 12711
12745 12712 /*
12746 12713 * if the old_dip does not match the cdip, that means there is
12747 12714 * some property change. since we'll be using the cdip, we need
12748 12715 * to offline the old_dip. If the state contains FCP_LUN_CHANGED
12749 12716 * then the dtype for the device has been updated. Offline the
12750 12717 * the old device and create a new device with the new device type
12751 12718 * Refer to bug: 4764752
12752 12719 */
12753 12720 if (old_pip && (pip != old_pip ||
12754 12721 plun->lun_state & FCP_LUN_CHANGED)) {
12755 12722 plun->lun_state &= ~(FCP_LUN_INIT);
12756 12723 mutex_exit(&plun->lun_mutex);
12757 12724 mutex_exit(&pptr->port_mutex);
12758 12725
12759 12726 mutex_enter(&ptgt->tgt_mutex);
12760 12727 (void) fcp_pass_to_hp(pptr, plun, CIP(old_pip),
12761 12728 FCP_OFFLINE, lcount, tcount,
12762 12729 NDI_DEVI_REMOVE, 0);
12763 12730 mutex_exit(&ptgt->tgt_mutex);
12764 12731
12765 12732 if (pip != NULL) {
12766 12733 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12767 12734 fcp_trace, FCP_BUF_LEVEL_2, 0,
12768 12735 "Old pip=%p; New pip=%p don't match",
12769 12736 old_pip, pip);
12770 12737 } else {
12771 12738 FCP_TRACE(fcp_logq, pptr->port_instbuf,
12772 12739 fcp_trace, FCP_BUF_LEVEL_2, 0,
12773 12740 "Old pip=%p; New pip=NULL don't match",
12774 12741 old_pip);
12775 12742 }
12776 12743
12777 12744 mutex_enter(&pptr->port_mutex);
12778 12745 mutex_enter(&plun->lun_mutex);
12779 12746 }
12780 12747
12781 12748 /*
12782 12749 * Since FC_WWN_SIZE is 8 bytes and its not like the
12783 12750 * lun_guid_size which is dependent on the target, I don't
12784 12751 * believe the same trancation happens here UNLESS the standards
12785 12752 * change the FC_WWN_SIZE value to something larger than
12786 12753 * MAXNAMELEN(currently 255 bytes).
12787 12754 */
12788 12755
12789 12756 for (i = 0; i < FC_WWN_SIZE; i++) {
12790 12757 (void) sprintf(&buf[i << 1], "%02x",
12791 12758 ptgt->tgt_port_wwn.raw_wwn[i]);
12792 12759 }
12793 12760
12794 12761 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x",
12795 12762 buf, plun->lun_num);
12796 12763
12797 12764 if (pip == NULL || plun->lun_state & FCP_LUN_CHANGED) {
12798 12765 /*
12799 12766 * Release the locks before calling into
12800 12767 * mdi_pi_alloc_compatible() since this can result in a
12801 12768 * callback into fcp which can result in a deadlock
12802 12769 * (see bug # 4870272).
12803 12770 *
12804 12771 * Basically, what we are trying to avoid is the scenario where
12805 12772 * one thread does ndi_devi_enter() and tries to grab
12806 12773 * fcp_mutex and another does it the other way round.
12807 12774 *
12808 12775 * But before we do that, make sure that nobody releases the
12809 12776 * port in the meantime. We can do this by setting a flag.
12810 12777 */
12811 12778 plun->lun_state &= ~(FCP_LUN_CHANGED);
12812 12779 pptr->port_state |= FCP_STATE_IN_MDI;
12813 12780 mutex_exit(&plun->lun_mutex);
12814 12781 mutex_exit(&pptr->port_mutex);
12815 12782 if (mdi_pi_alloc_compatible(pdip, nname, plun->lun_guid,
12816 12783 uaddr, compatible, ncompatible, 0, &pip) != MDI_SUCCESS) {
12817 12784 fcp_log(CE_WARN, pptr->port_dip,
12818 12785 "!path alloc failed:0x%x", plun);
12819 12786 mutex_enter(&pptr->port_mutex);
12820 12787 mutex_enter(&plun->lun_mutex);
12821 12788 pptr->port_state &= ~FCP_STATE_IN_MDI;
12822 12789 failure++;
12823 12790 goto end_of_fcp_create_pip;
12824 12791 }
12825 12792 mutex_enter(&pptr->port_mutex);
12826 12793 mutex_enter(&plun->lun_mutex);
12827 12794 pptr->port_state &= ~FCP_STATE_IN_MDI;
12828 12795 } else {
12829 12796 (void) mdi_prop_remove(pip, NULL);
12830 12797 }
12831 12798
12832 12799 mdi_pi_set_phci_private(pip, (caddr_t)plun);
12833 12800
12834 12801 if (mdi_prop_update_byte_array(pip, NODE_WWN_PROP,
12835 12802 ptgt->tgt_node_wwn.raw_wwn, FC_WWN_SIZE)
12836 12803 != DDI_PROP_SUCCESS) {
12837 12804 failure++;
12838 12805 goto end_of_fcp_create_pip;
12839 12806 }
12840 12807
12841 12808 if (mdi_prop_update_byte_array(pip, PORT_WWN_PROP,
12842 12809 ptgt->tgt_port_wwn.raw_wwn, FC_WWN_SIZE)
12843 12810 != DDI_PROP_SUCCESS) {
12844 12811 failure++;
12845 12812 goto end_of_fcp_create_pip;
12846 12813 }
12847 12814
12848 12815 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, t_pwwn);
12849 12816 t_pwwn[16] = '\0';
12850 12817 if (mdi_prop_update_string(pip, TGT_PORT_PROP, t_pwwn)
12851 12818 != DDI_PROP_SUCCESS) {
12852 12819 failure++;
12853 12820 goto end_of_fcp_create_pip;
12854 12821 }
12855 12822
12856 12823 /*
12857 12824 * If there is no hard address - We might have to deal with
12858 12825 * that by using WWN - Having said that it is important to
12859 12826 * recognize this problem early so ssd can be informed of
12860 12827 * the right interconnect type.
12861 12828 */
12862 12829 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12863 12830 ptgt->tgt_hard_addr != 0) {
12864 12831 tgt_id = (uint32_t)
12865 12832 fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12866 12833 } else {
12867 12834 tgt_id = ptgt->tgt_d_id;
12868 12835 }
12869 12836
12870 12837 if (mdi_prop_update_int(pip, TARGET_PROP, tgt_id)
12871 12838 != DDI_PROP_SUCCESS) {
12872 12839 failure++;
12873 12840 goto end_of_fcp_create_pip;
12874 12841 }
12875 12842
12876 12843 if (mdi_prop_update_int(pip, LUN_PROP, (int)plun->lun_num)
12877 12844 != DDI_PROP_SUCCESS) {
12878 12845 failure++;
12879 12846 goto end_of_fcp_create_pip;
12880 12847 }
12881 12848 bcopy(&plun->lun_addr, &sam_lun, FCP_LUN_SIZE);
12882 12849 if (mdi_prop_update_int64(pip, SAM_LUN_PROP, sam_lun)
12883 12850 != DDI_PROP_SUCCESS) {
12884 12851 failure++;
12885 12852 goto end_of_fcp_create_pip;
12886 12853 }
12887 12854
12888 12855 end_of_fcp_create_pip:
12889 12856 scsi_hba_nodename_compatible_free(nname, compatible);
12890 12857
12891 12858 if (pip != NULL && failure) {
12892 12859 (void) mdi_prop_remove(pip, NULL);
12893 12860 mutex_exit(&plun->lun_mutex);
12894 12861 mutex_exit(&pptr->port_mutex);
12895 12862 (void) mdi_pi_free(pip, 0);
12896 12863 mutex_enter(&pptr->port_mutex);
12897 12864 mutex_enter(&plun->lun_mutex);
12898 12865 pip = NULL;
12899 12866 }
12900 12867
12901 12868 return (pip);
12902 12869 }
12903 12870
12904 12871 static dev_info_t *
12905 12872 fcp_find_existing_dip(struct fcp_lun *plun, dev_info_t *pdip, caddr_t name)
12906 12873 {
12907 12874 uint_t nbytes;
12908 12875 uchar_t *bytes;
12909 12876 uint_t nwords;
12910 12877 uint32_t tgt_id;
12911 12878 int *words;
12912 12879 dev_info_t *cdip;
12913 12880 dev_info_t *ndip;
12914 12881 struct fcp_tgt *ptgt = plun->lun_tgt;
12915 12882 struct fcp_port *pptr = ptgt->tgt_port;
12916 12883 int circular;
12917 12884
12918 12885 ndi_devi_enter(pdip, &circular);
12919 12886
12920 12887 ndip = (dev_info_t *)DEVI(pdip)->devi_child;
12921 12888 while ((cdip = ndip) != NULL) {
12922 12889 ndip = (dev_info_t *)DEVI(cdip)->devi_sibling;
12923 12890
12924 12891 if (strcmp(DEVI(cdip)->devi_node_name, name)) {
12925 12892 continue;
12926 12893 }
12927 12894
12928 12895 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12929 12896 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, NODE_WWN_PROP, &bytes,
12930 12897 &nbytes) != DDI_PROP_SUCCESS) {
12931 12898 continue;
12932 12899 }
12933 12900
12934 12901 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12935 12902 if (bytes != NULL) {
12936 12903 ddi_prop_free(bytes);
12937 12904 }
12938 12905 continue;
12939 12906 }
12940 12907 ASSERT(bytes != NULL);
12941 12908
12942 12909 if (bcmp(bytes, ptgt->tgt_node_wwn.raw_wwn, nbytes) != 0) {
12943 12910 ddi_prop_free(bytes);
12944 12911 continue;
12945 12912 }
12946 12913
12947 12914 ddi_prop_free(bytes);
12948 12915
12949 12916 if (ddi_prop_lookup_byte_array(DDI_DEV_T_ANY, cdip,
12950 12917 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, PORT_WWN_PROP, &bytes,
12951 12918 &nbytes) != DDI_PROP_SUCCESS) {
12952 12919 continue;
12953 12920 }
12954 12921
12955 12922 if (nbytes != FC_WWN_SIZE || bytes == NULL) {
12956 12923 if (bytes != NULL) {
12957 12924 ddi_prop_free(bytes);
12958 12925 }
12959 12926 continue;
12960 12927 }
12961 12928 ASSERT(bytes != NULL);
12962 12929
12963 12930 if (bcmp(bytes, ptgt->tgt_port_wwn.raw_wwn, nbytes) != 0) {
12964 12931 ddi_prop_free(bytes);
12965 12932 continue;
12966 12933 }
12967 12934
12968 12935 ddi_prop_free(bytes);
12969 12936
12970 12937 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
12971 12938 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, TARGET_PROP, &words,
12972 12939 &nwords) != DDI_PROP_SUCCESS) {
12973 12940 continue;
12974 12941 }
12975 12942
12976 12943 if (nwords != 1 || words == NULL) {
12977 12944 if (words != NULL) {
12978 12945 ddi_prop_free(words);
12979 12946 }
12980 12947 continue;
12981 12948 }
12982 12949 ASSERT(words != NULL);
12983 12950
12984 12951 /*
12985 12952 * If there is no hard address - We might have to deal with
12986 12953 * that by using WWN - Having said that it is important to
12987 12954 * recognize this problem early so ssd can be informed of
12988 12955 * the right interconnect type.
12989 12956 */
12990 12957 if (!FC_TOP_EXTERNAL(pptr->port_topology) &&
12991 12958 ptgt->tgt_hard_addr != 0) {
12992 12959 tgt_id =
12993 12960 (uint32_t)fcp_alpa_to_switch[ptgt->tgt_hard_addr];
12994 12961 } else {
12995 12962 tgt_id = ptgt->tgt_d_id;
12996 12963 }
12997 12964
12998 12965 if (tgt_id != (uint32_t)*words) {
12999 12966 ddi_prop_free(words);
13000 12967 continue;
13001 12968 }
13002 12969 ddi_prop_free(words);
13003 12970
13004 12971 if (ddi_prop_lookup_int_array(DDI_DEV_T_ANY, cdip,
13005 12972 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM, LUN_PROP, &words,
13006 12973 &nwords) != DDI_PROP_SUCCESS) {
13007 12974 continue;
13008 12975 }
13009 12976
13010 12977 if (nwords != 1 || words == NULL) {
13011 12978 if (words != NULL) {
13012 12979 ddi_prop_free(words);
13013 12980 }
13014 12981 continue;
13015 12982 }
13016 12983 ASSERT(words != NULL);
13017 12984
13018 12985 if (plun->lun_num == (uint16_t)*words) {
13019 12986 ddi_prop_free(words);
13020 12987 break;
13021 12988 }
13022 12989 ddi_prop_free(words);
13023 12990 }
13024 12991 ndi_devi_exit(pdip, circular);
13025 12992
13026 12993 return (cdip);
13027 12994 }
13028 12995
13029 12996
13030 12997 static int
13031 12998 fcp_is_pip_present(struct fcp_lun *plun, mdi_pathinfo_t *pip)
13032 12999 {
13033 13000 dev_info_t *pdip;
13034 13001 char buf[MAXNAMELEN];
13035 13002 char uaddr[MAXNAMELEN];
13036 13003 int rval = FC_FAILURE;
13037 13004
13038 13005 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13039 13006
13040 13007 pdip = plun->lun_tgt->tgt_port->port_dip;
13041 13008
13042 13009 /*
13043 13010 * Check if pip (and not plun->lun_cip) is NULL. plun->lun_cip can be
13044 13011 * non-NULL even when the LUN is not there as in the case when a LUN is
13045 13012 * configured and then deleted on the device end (for T3/T4 case). In
13046 13013 * such cases, pip will be NULL.
13047 13014 *
13048 13015 * If the device generates an RSCN, it will end up getting offlined when
13049 13016 * it disappeared and a new LUN will get created when it is rediscovered
13050 13017 * on the device. If we check for lun_cip here, the LUN will not end
13051 13018 * up getting onlined since this function will end up returning a
13052 13019 * FC_SUCCESS.
13053 13020 *
13054 13021 * The behavior is different on other devices. For instance, on a HDS,
13055 13022 * there was no RSCN generated by the device but the next I/O generated
13056 13023 * a check condition and rediscovery got triggered that way. So, in
13057 13024 * such cases, this path will not be exercised
13058 13025 */
13059 13026 if (pip == NULL) {
13060 13027 FCP_TRACE(fcp_logq, LUN_PORT->port_instbuf,
13061 13028 fcp_trace, FCP_BUF_LEVEL_4, 0,
13062 13029 "fcp_is_pip_present: plun->lun_cip is NULL: "
|
↓ open down ↓ |
4819 lines elided |
↑ open up ↑ |
13063 13030 "plun: %p lun state: %x num: %d target state: %x",
13064 13031 plun, plun->lun_state, plun->lun_num,
13065 13032 plun->lun_tgt->tgt_port->port_state);
13066 13033 return (rval);
13067 13034 }
13068 13035
13069 13036 fcp_wwn_to_ascii(plun->lun_tgt->tgt_port_wwn.raw_wwn, buf);
13070 13037
13071 13038 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13072 13039
13073 - if (plun->lun_old_guid) {
13074 - if (mdi_pi_find(pdip, plun->lun_old_guid, uaddr) == pip) {
13075 - rval = FC_SUCCESS;
13076 - }
13077 - } else {
13078 - if (mdi_pi_find(pdip, plun->lun_guid, uaddr) == pip) {
13079 - rval = FC_SUCCESS;
13080 - }
13040 + if (mdi_pi_find(pdip, NULL, uaddr) == pip) {
13041 + rval = FC_SUCCESS;
13081 13042 }
13043 +
13082 13044 return (rval);
13083 13045 }
13084 13046
13085 13047 static mdi_pathinfo_t *
13086 13048 fcp_find_existing_pip(struct fcp_lun *plun, dev_info_t *pdip)
13087 13049 {
13088 13050 char buf[MAXNAMELEN];
13089 13051 char uaddr[MAXNAMELEN];
13090 13052 mdi_pathinfo_t *pip;
13091 13053 struct fcp_tgt *ptgt = plun->lun_tgt;
13092 13054 struct fcp_port *pptr = ptgt->tgt_port;
13093 13055
13094 13056 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13095 13057
13096 13058 fcp_wwn_to_ascii(ptgt->tgt_port_wwn.raw_wwn, buf);
13097 13059 (void) snprintf(uaddr, MAXNAMELEN, "w%s,%x", buf, plun->lun_num);
13098 13060
13099 13061 pip = mdi_pi_find(pdip, plun->lun_guid, uaddr);
13100 13062
13101 13063 return (pip);
13102 13064 }
13103 13065
13104 13066
13105 13067 static int
13106 13068 fcp_online_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13107 13069 int tcount, int flags, int *circ)
13108 13070 {
13109 13071 int rval;
13110 13072 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13111 13073 struct fcp_tgt *ptgt = plun->lun_tgt;
13112 13074 dev_info_t *cdip = NULL;
13113 13075
13114 13076 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13115 13077 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13116 13078
13117 13079 if (plun->lun_cip == NULL) {
13118 13080 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13119 13081 fcp_trace, FCP_BUF_LEVEL_3, 0,
13120 13082 "fcp_online_child: plun->lun_cip is NULL: "
13121 13083 "plun: %p state: %x num: %d target state: %x",
13122 13084 plun, plun->lun_state, plun->lun_num,
13123 13085 plun->lun_tgt->tgt_port->port_state);
13124 13086 return (NDI_FAILURE);
13125 13087 }
13126 13088 again:
13127 13089 if (plun->lun_mpxio == 0) {
13128 13090 cdip = DIP(cip);
13129 13091 mutex_exit(&plun->lun_mutex);
13130 13092 mutex_exit(&pptr->port_mutex);
13131 13093
13132 13094 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13133 13095 fcp_trace, FCP_BUF_LEVEL_3, 0,
13134 13096 "!Invoking ndi_devi_online for %s: target=%x lun=%x",
13135 13097 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13136 13098
13137 13099 /*
13138 13100 * We could check for FCP_LUN_INIT here but chances
13139 13101 * of getting here when it's already in FCP_LUN_INIT
13140 13102 * is rare and a duplicate ndi_devi_online wouldn't
13141 13103 * hurt either (as the node would already have been
13142 13104 * in CF2)
13143 13105 */
13144 13106 if (!i_ddi_devi_attached(ddi_get_parent(cdip))) {
13145 13107 rval = ndi_devi_bind_driver(cdip, flags);
13146 13108 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13147 13109 fcp_trace, FCP_BUF_LEVEL_3, 0,
13148 13110 "!Invoking ndi_devi_bind_driver: rval=%d", rval);
13149 13111 } else {
13150 13112 rval = ndi_devi_online(cdip, flags);
13151 13113 }
13152 13114
13153 13115 /*
13154 13116 * We log the message into trace buffer if the device
13155 13117 * is "ses" and into syslog for any other device
13156 13118 * type. This is to prevent the ndi_devi_online failure
13157 13119 * message that appears for V880/A5K ses devices.
13158 13120 */
13159 13121 if (rval == NDI_SUCCESS) {
13160 13122 mutex_enter(&ptgt->tgt_mutex);
13161 13123 plun->lun_state |= FCP_LUN_INIT;
13162 13124 mutex_exit(&ptgt->tgt_mutex);
13163 13125 } else if (strncmp(ddi_node_name(cdip), "ses", 3) != 0) {
13164 13126 fcp_log(CE_NOTE, pptr->port_dip,
13165 13127 "!ndi_devi_online:"
13166 13128 " failed for %s: target=%x lun=%x %x",
13167 13129 ddi_get_name(cdip), ptgt->tgt_d_id,
13168 13130 plun->lun_num, rval);
13169 13131 } else {
13170 13132 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13171 13133 fcp_trace, FCP_BUF_LEVEL_3, 0,
13172 13134 " !ndi_devi_online:"
13173 13135 " failed for %s: target=%x lun=%x %x",
13174 13136 ddi_get_name(cdip), ptgt->tgt_d_id,
13175 13137 plun->lun_num, rval);
13176 13138 }
13177 13139 } else {
13178 13140 cdip = mdi_pi_get_client(PIP(cip));
13179 13141 mutex_exit(&plun->lun_mutex);
13180 13142 mutex_exit(&pptr->port_mutex);
13181 13143
13182 13144 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13183 13145 fcp_trace, FCP_BUF_LEVEL_3, 0,
13184 13146 "!Invoking mdi_pi_online for %s: target=%x lun=%x",
13185 13147 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13186 13148
13187 13149 /*
13188 13150 * Hold path and exit phci to avoid deadlock with power
13189 13151 * management code during mdi_pi_online.
13190 13152 */
13191 13153 mdi_hold_path(PIP(cip));
13192 13154 mdi_devi_exit_phci(pptr->port_dip, *circ);
13193 13155
13194 13156 rval = mdi_pi_online(PIP(cip), flags);
13195 13157
13196 13158 mdi_devi_enter_phci(pptr->port_dip, circ);
13197 13159 mdi_rele_path(PIP(cip));
13198 13160
13199 13161 if (rval == MDI_SUCCESS) {
13200 13162 mutex_enter(&ptgt->tgt_mutex);
13201 13163 plun->lun_state |= FCP_LUN_INIT;
13202 13164 mutex_exit(&ptgt->tgt_mutex);
13203 13165
13204 13166 /*
13205 13167 * Clear MPxIO path permanent disable in case
13206 13168 * fcp hotplug dropped the offline event.
13207 13169 */
13208 13170 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13209 13171
13210 13172 } else if (rval == MDI_NOT_SUPPORTED) {
13211 13173 child_info_t *old_cip = cip;
13212 13174
13213 13175 /*
13214 13176 * MPxIO does not support this device yet.
13215 13177 * Enumerate in legacy mode.
13216 13178 */
13217 13179 mutex_enter(&pptr->port_mutex);
13218 13180 mutex_enter(&plun->lun_mutex);
13219 13181 plun->lun_mpxio = 0;
13220 13182 plun->lun_cip = NULL;
13221 13183 cdip = fcp_create_dip(plun, lcount, tcount);
13222 13184 plun->lun_cip = cip = CIP(cdip);
13223 13185 if (cip == NULL) {
13224 13186 fcp_log(CE_WARN, pptr->port_dip,
13225 13187 "!fcp_online_child: "
13226 13188 "Create devinfo failed for LU=%p", plun);
13227 13189 mutex_exit(&plun->lun_mutex);
13228 13190
13229 13191 mutex_enter(&ptgt->tgt_mutex);
13230 13192 plun->lun_state |= FCP_LUN_OFFLINE;
13231 13193 mutex_exit(&ptgt->tgt_mutex);
13232 13194
13233 13195 mutex_exit(&pptr->port_mutex);
13234 13196
13235 13197 /*
13236 13198 * free the mdi_pathinfo node
13237 13199 */
13238 13200 (void) mdi_pi_free(PIP(old_cip), 0);
13239 13201 } else {
13240 13202 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13241 13203 fcp_trace, FCP_BUF_LEVEL_3, 0,
13242 13204 "fcp_online_child: creating devinfo "
13243 13205 "node 0x%p for plun 0x%p",
13244 13206 cip, plun);
13245 13207 mutex_exit(&plun->lun_mutex);
13246 13208 mutex_exit(&pptr->port_mutex);
13247 13209 /*
13248 13210 * free the mdi_pathinfo node
13249 13211 */
13250 13212 (void) mdi_pi_free(PIP(old_cip), 0);
13251 13213 mutex_enter(&pptr->port_mutex);
13252 13214 mutex_enter(&plun->lun_mutex);
13253 13215 goto again;
13254 13216 }
13255 13217 } else {
13256 13218 if (cdip) {
13257 13219 fcp_log(CE_NOTE, pptr->port_dip,
13258 13220 "!fcp_online_child: mdi_pi_online:"
13259 13221 " failed for %s: target=%x lun=%x %x",
13260 13222 ddi_get_name(cdip), ptgt->tgt_d_id,
13261 13223 plun->lun_num, rval);
13262 13224 }
13263 13225 }
13264 13226 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13265 13227 }
13266 13228
13267 13229 if (rval == NDI_SUCCESS) {
13268 13230 if (cdip) {
13269 13231 (void) ndi_event_retrieve_cookie(
13270 13232 pptr->port_ndi_event_hdl, cdip, FCAL_INSERT_EVENT,
13271 13233 &fcp_insert_eid, NDI_EVENT_NOPASS);
13272 13234 (void) ndi_event_run_callbacks(pptr->port_ndi_event_hdl,
13273 13235 cdip, fcp_insert_eid, NULL);
13274 13236 }
13275 13237 }
13276 13238 mutex_enter(&pptr->port_mutex);
13277 13239 mutex_enter(&plun->lun_mutex);
13278 13240 return (rval);
13279 13241 }
13280 13242
13281 13243 /* ARGSUSED */
13282 13244 static int
13283 13245 fcp_offline_child(struct fcp_lun *plun, child_info_t *cip, int lcount,
13284 13246 int tcount, int flags, int *circ)
13285 13247 {
13286 13248 int rval;
13287 13249 int lun_mpxio;
13288 13250 struct fcp_port *pptr = plun->lun_tgt->tgt_port;
13289 13251 struct fcp_tgt *ptgt = plun->lun_tgt;
13290 13252 dev_info_t *cdip;
13291 13253
13292 13254 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13293 13255 ASSERT(MUTEX_HELD(&pptr->port_mutex));
13294 13256
13295 13257 if (plun->lun_cip == NULL) {
13296 13258 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13297 13259 fcp_trace, FCP_BUF_LEVEL_3, 0,
13298 13260 "fcp_offline_child: plun->lun_cip is NULL: "
13299 13261 "plun: %p lun state: %x num: %d target state: %x",
13300 13262 plun, plun->lun_state, plun->lun_num,
13301 13263 plun->lun_tgt->tgt_port->port_state);
13302 13264 return (NDI_FAILURE);
13303 13265 }
13304 13266
|
↓ open down ↓ |
213 lines elided |
↑ open up ↑ |
13305 13267 /*
13306 13268 * We will use this value twice. Make a copy to be sure we use
13307 13269 * the same value in both places.
13308 13270 */
13309 13271 lun_mpxio = plun->lun_mpxio;
13310 13272
13311 13273 if (lun_mpxio == 0) {
13312 13274 cdip = DIP(cip);
13313 13275 mutex_exit(&plun->lun_mutex);
13314 13276 mutex_exit(&pptr->port_mutex);
13315 - rval = ndi_devi_offline(DIP(cip), flags);
13277 + rval = ndi_devi_offline(DIP(cip), NDI_DEVFS_CLEAN | flags);
13316 13278 if (rval != NDI_SUCCESS) {
13317 13279 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13318 13280 fcp_trace, FCP_BUF_LEVEL_3, 0,
13319 13281 "fcp_offline_child: ndi_devi_offline failed "
13320 13282 "rval=%x cip=%p", rval, cip);
13321 13283 }
13322 13284 } else {
13323 13285 cdip = mdi_pi_get_client(PIP(cip));
13324 13286 mutex_exit(&plun->lun_mutex);
13325 13287 mutex_exit(&pptr->port_mutex);
13326 13288
13327 13289 /*
13328 13290 * Exit phci to avoid deadlock with power management code
13329 13291 * during mdi_pi_offline
13330 13292 */
13331 13293 mdi_hold_path(PIP(cip));
13332 13294 mdi_devi_exit_phci(pptr->port_dip, *circ);
13333 13295
13334 - rval = mdi_pi_offline(PIP(cip), flags);
13296 + rval = mdi_pi_offline(PIP(cip), flags & ~NDI_DEVI_REMOVE);
13335 13297
13336 13298 mdi_devi_enter_phci(pptr->port_dip, circ);
13337 13299 mdi_rele_path(PIP(cip));
13338 13300
13339 13301 rval = (rval == MDI_SUCCESS) ? NDI_SUCCESS : NDI_FAILURE;
13340 13302 }
13341 13303
13342 13304 mutex_enter(&ptgt->tgt_mutex);
13343 13305 plun->lun_state &= ~FCP_LUN_INIT;
13344 13306 mutex_exit(&ptgt->tgt_mutex);
13345 13307
13346 13308 if (rval == NDI_SUCCESS) {
13347 13309 cdip = NULL;
13348 13310 if (flags & NDI_DEVI_REMOVE) {
13349 13311 mutex_enter(&plun->lun_mutex);
13350 13312 /*
13351 13313 * If the guid of the LUN changes, lun_cip will not
13352 13314 * equal to cip, and after offlining the LUN with the
13353 13315 * old guid, we should keep lun_cip since it's the cip
13354 13316 * of the LUN with the new guid.
13355 13317 * Otherwise remove our reference to child node.
13356 13318 *
13357 13319 * This must be done before the child node is freed,
13358 13320 * otherwise other threads could see a stale lun_cip
13359 13321 * pointer.
13360 13322 */
13361 13323 if (plun->lun_cip == cip) {
13362 13324 plun->lun_cip = NULL;
13363 13325 }
13364 13326 if (plun->lun_old_guid) {
13365 13327 kmem_free(plun->lun_old_guid,
13366 13328 plun->lun_old_guid_size);
13367 13329 plun->lun_old_guid = NULL;
13368 13330 plun->lun_old_guid_size = 0;
13369 13331 }
13370 13332 mutex_exit(&plun->lun_mutex);
13371 13333 }
13372 13334 }
13373 13335
13374 13336 if (lun_mpxio != 0) {
13375 13337 if (rval == NDI_SUCCESS) {
13376 13338 /*
13377 13339 * Clear MPxIO path permanent disable as the path is
13378 13340 * already offlined.
13379 13341 */
13380 13342 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE);
13381 13343
13382 13344 if (flags & NDI_DEVI_REMOVE) {
13383 13345 (void) mdi_pi_free(PIP(cip), 0);
13384 13346 }
13385 13347 } else {
13386 13348 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13387 13349 fcp_trace, FCP_BUF_LEVEL_3, 0,
13388 13350 "fcp_offline_child: mdi_pi_offline failed "
13389 13351 "rval=%x cip=%p", rval, cip);
13390 13352 }
13391 13353 }
13392 13354
13393 13355 mutex_enter(&pptr->port_mutex);
13394 13356 mutex_enter(&plun->lun_mutex);
13395 13357
13396 13358 if (cdip) {
13397 13359 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13398 13360 fcp_trace, FCP_BUF_LEVEL_3, 0, "!%s failed for %s:"
13399 13361 " target=%x lun=%x", "ndi_offline",
13400 13362 ddi_get_name(cdip), ptgt->tgt_d_id, plun->lun_num);
13401 13363 }
13402 13364
13403 13365 return (rval);
13404 13366 }
13405 13367
13406 13368 static void
13407 13369 fcp_remove_child(struct fcp_lun *plun)
13408 13370 {
13409 13371 child_info_t *cip;
13410 13372 int circ;
13411 13373
13412 13374 ASSERT(MUTEX_HELD(&plun->lun_mutex));
13413 13375
13414 13376 if (fcp_is_child_present(plun, plun->lun_cip) == FC_SUCCESS) {
13415 13377 if (plun->lun_mpxio == 0) {
13416 13378 (void) ndi_prop_remove_all(DIP(plun->lun_cip));
13417 13379 (void) ndi_devi_free(DIP(plun->lun_cip));
13418 13380 plun->lun_cip = NULL;
13419 13381 } else {
13420 13382 /*
13421 13383 * Clear reference to the child node in the lun.
13422 13384 * This must be done before freeing it with mdi_pi_free
13423 13385 * and with lun_mutex held so that other threads always
|
↓ open down ↓ |
79 lines elided |
↑ open up ↑ |
13424 13386 * see either valid lun_cip or NULL when holding
13425 13387 * lun_mutex. We keep a copy in cip.
13426 13388 */
13427 13389 cip = plun->lun_cip;
13428 13390 plun->lun_cip = NULL;
13429 13391
13430 13392 mutex_exit(&plun->lun_mutex);
13431 13393 mutex_exit(&plun->lun_tgt->tgt_mutex);
13432 13394 mutex_exit(&plun->lun_tgt->tgt_port->port_mutex);
13433 13395
13434 - mdi_devi_enter(
13435 - plun->lun_tgt->tgt_port->port_dip, &circ);
13396 + mdi_devi_enter(plun->lun_tgt->tgt_port->port_dip,
13397 + &circ);
13436 13398
13437 13399 /*
13438 13400 * Exit phci to avoid deadlock with power management
13439 13401 * code during mdi_pi_offline
13440 13402 */
13441 13403 mdi_hold_path(PIP(cip));
13442 - mdi_devi_exit_phci(
13443 - plun->lun_tgt->tgt_port->port_dip, circ);
13444 - (void) mdi_pi_offline(PIP(cip),
13445 - NDI_DEVI_REMOVE);
13446 - mdi_devi_enter_phci(
13447 - plun->lun_tgt->tgt_port->port_dip, &circ);
13404 + mdi_devi_exit_phci(plun->lun_tgt->tgt_port->port_dip,
13405 + circ);
13406 + (void) mdi_pi_offline(PIP(cip), 0);
13407 + mdi_devi_enter_phci(plun->lun_tgt->tgt_port->port_dip,
13408 + &circ);
13448 13409 mdi_rele_path(PIP(cip));
13449 13410
13450 - mdi_devi_exit(
13451 - plun->lun_tgt->tgt_port->port_dip, circ);
13411 + mdi_devi_exit(plun->lun_tgt->tgt_port->port_dip, circ);
13452 13412
13453 13413 FCP_TRACE(fcp_logq,
13454 13414 plun->lun_tgt->tgt_port->port_instbuf,
13455 13415 fcp_trace, FCP_BUF_LEVEL_3, 0,
13456 13416 "lun=%p pip freed %p", plun, cip);
13457 13417
13458 13418 (void) mdi_prop_remove(PIP(cip), NULL);
13459 13419 (void) mdi_pi_free(PIP(cip), 0);
13460 13420
13461 13421 mutex_enter(&plun->lun_tgt->tgt_port->port_mutex);
13462 13422 mutex_enter(&plun->lun_tgt->tgt_mutex);
13463 13423 mutex_enter(&plun->lun_mutex);
13464 13424 }
13465 13425 } else {
13466 13426 plun->lun_cip = NULL;
13467 13427 }
13468 13428 }
13469 13429
13470 13430 /*
13471 13431 * called when a timeout occurs
13472 13432 *
13473 13433 * can be scheduled during an attach or resume (if not already running)
13474 13434 *
13475 13435 * one timeout is set up for all ports
13476 13436 *
13477 13437 * acquires and releases the global mutex
13478 13438 */
13479 13439 /*ARGSUSED*/
13480 13440 static void
13481 13441 fcp_watch(void *arg)
13482 13442 {
13483 13443 struct fcp_port *pptr;
13484 13444 struct fcp_ipkt *icmd;
13485 13445 struct fcp_ipkt *nicmd;
13486 13446 struct fcp_pkt *cmd;
13487 13447 struct fcp_pkt *ncmd;
13488 13448 struct fcp_pkt *tail;
13489 13449 struct fcp_pkt *pcmd;
13490 13450 struct fcp_pkt *save_head;
13491 13451 struct fcp_port *save_port;
13492 13452
13493 13453 /* increment global watchdog time */
13494 13454 fcp_watchdog_time += fcp_watchdog_timeout;
13495 13455
13496 13456 mutex_enter(&fcp_global_mutex);
13497 13457
13498 13458 /* scan each port in our list */
13499 13459 for (pptr = fcp_port_head; pptr != NULL; pptr = pptr->port_next) {
13500 13460 save_port = fcp_port_head;
13501 13461 pptr->port_state |= FCP_STATE_IN_WATCHDOG;
13502 13462 mutex_exit(&fcp_global_mutex);
13503 13463
13504 13464 mutex_enter(&pptr->port_mutex);
13505 13465 if (pptr->port_ipkt_list == NULL &&
13506 13466 (pptr->port_state & (FCP_STATE_SUSPENDED |
13507 13467 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN))) {
13508 13468 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13509 13469 mutex_exit(&pptr->port_mutex);
13510 13470 mutex_enter(&fcp_global_mutex);
13511 13471 goto end_of_watchdog;
13512 13472 }
13513 13473
13514 13474 /*
13515 13475 * We check if a list of targets need to be offlined.
13516 13476 */
13517 13477 if (pptr->port_offline_tgts) {
13518 13478 fcp_scan_offline_tgts(pptr);
13519 13479 }
13520 13480
13521 13481 /*
13522 13482 * We check if a list of luns need to be offlined.
13523 13483 */
13524 13484 if (pptr->port_offline_luns) {
13525 13485 fcp_scan_offline_luns(pptr);
13526 13486 }
13527 13487
13528 13488 /*
13529 13489 * We check if a list of targets or luns need to be reset.
13530 13490 */
13531 13491 if (pptr->port_reset_list) {
13532 13492 fcp_check_reset_delay(pptr);
13533 13493 }
13534 13494
13535 13495 mutex_exit(&pptr->port_mutex);
13536 13496
13537 13497 /*
13538 13498 * This is where the pending commands (pkt) are checked for
13539 13499 * timeout.
13540 13500 */
13541 13501 mutex_enter(&pptr->port_pkt_mutex);
13542 13502 tail = pptr->port_pkt_tail;
13543 13503
13544 13504 for (pcmd = NULL, cmd = pptr->port_pkt_head;
13545 13505 cmd != NULL; cmd = ncmd) {
13546 13506 ncmd = cmd->cmd_next;
13547 13507 /*
13548 13508 * If a command is in this queue the bit CFLAG_IN_QUEUE
13549 13509 * must be set.
13550 13510 */
13551 13511 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
13552 13512 /*
13553 13513 * FCP_INVALID_TIMEOUT will be set for those
13554 13514 * command that need to be failed. Mostly those
13555 13515 * cmds that could not be queued down for the
13556 13516 * "timeout" value. cmd->cmd_timeout is used
13557 13517 * to try and requeue the command regularly.
13558 13518 */
13559 13519 if (cmd->cmd_timeout >= fcp_watchdog_time) {
13560 13520 /*
13561 13521 * This command hasn't timed out yet. Let's
13562 13522 * go to the next one.
13563 13523 */
13564 13524 pcmd = cmd;
13565 13525 goto end_of_loop;
13566 13526 }
13567 13527
13568 13528 if (cmd == pptr->port_pkt_head) {
13569 13529 ASSERT(pcmd == NULL);
13570 13530 pptr->port_pkt_head = cmd->cmd_next;
13571 13531 } else {
13572 13532 ASSERT(pcmd != NULL);
13573 13533 pcmd->cmd_next = cmd->cmd_next;
13574 13534 }
13575 13535
13576 13536 if (cmd == pptr->port_pkt_tail) {
13577 13537 ASSERT(cmd->cmd_next == NULL);
13578 13538 pptr->port_pkt_tail = pcmd;
13579 13539 if (pcmd) {
13580 13540 pcmd->cmd_next = NULL;
13581 13541 }
13582 13542 }
13583 13543 cmd->cmd_next = NULL;
13584 13544
13585 13545 /*
13586 13546 * save the current head before dropping the
13587 13547 * mutex - If the head doesn't remain the
13588 13548 * same after re acquiring the mutex, just
13589 13549 * bail out and revisit on next tick.
13590 13550 *
13591 13551 * PS: The tail pointer can change as the commands
13592 13552 * get requeued after failure to retransport
13593 13553 */
13594 13554 save_head = pptr->port_pkt_head;
13595 13555 mutex_exit(&pptr->port_pkt_mutex);
13596 13556
13597 13557 if (cmd->cmd_fp_pkt->pkt_timeout ==
13598 13558 FCP_INVALID_TIMEOUT) {
13599 13559 struct scsi_pkt *pkt = cmd->cmd_pkt;
13600 13560 struct fcp_lun *plun;
13601 13561 struct fcp_tgt *ptgt;
13602 13562
13603 13563 plun = ADDR2LUN(&pkt->pkt_address);
13604 13564 ptgt = plun->lun_tgt;
13605 13565
13606 13566 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13607 13567 fcp_trace, FCP_BUF_LEVEL_2, 0,
13608 13568 "SCSI cmd 0x%x to D_ID=%x timed out",
13609 13569 pkt->pkt_cdbp[0], ptgt->tgt_d_id);
13610 13570
13611 13571 cmd->cmd_state == FCP_PKT_ABORTING ?
13612 13572 fcp_fail_cmd(cmd, CMD_RESET,
13613 13573 STAT_DEV_RESET) : fcp_fail_cmd(cmd,
13614 13574 CMD_TIMEOUT, STAT_ABORTED);
13615 13575 } else {
13616 13576 fcp_retransport_cmd(pptr, cmd);
13617 13577 }
13618 13578 mutex_enter(&pptr->port_pkt_mutex);
13619 13579 if (save_head && save_head != pptr->port_pkt_head) {
13620 13580 /*
13621 13581 * Looks like linked list got changed (mostly
13622 13582 * happens when an an OFFLINE LUN code starts
13623 13583 * returning overflow queue commands in
13624 13584 * parallel. So bail out and revisit during
13625 13585 * next tick
13626 13586 */
13627 13587 break;
13628 13588 }
13629 13589 end_of_loop:
13630 13590 /*
13631 13591 * Scan only upto the previously known tail pointer
13632 13592 * to avoid excessive processing - lots of new packets
13633 13593 * could have been added to the tail or the old ones
13634 13594 * re-queued.
13635 13595 */
13636 13596 if (cmd == tail) {
13637 13597 break;
13638 13598 }
13639 13599 }
13640 13600 mutex_exit(&pptr->port_pkt_mutex);
13641 13601
13642 13602 mutex_enter(&pptr->port_mutex);
13643 13603 for (icmd = pptr->port_ipkt_list; icmd != NULL; icmd = nicmd) {
13644 13604 struct fcp_tgt *ptgt = icmd->ipkt_tgt;
13645 13605
13646 13606 nicmd = icmd->ipkt_next;
13647 13607 if ((icmd->ipkt_restart != 0) &&
13648 13608 (icmd->ipkt_restart >= fcp_watchdog_time)) {
13649 13609 /* packet has not timed out */
13650 13610 continue;
13651 13611 }
13652 13612
13653 13613 /* time for packet re-transport */
13654 13614 if (icmd == pptr->port_ipkt_list) {
13655 13615 pptr->port_ipkt_list = icmd->ipkt_next;
13656 13616 if (pptr->port_ipkt_list) {
13657 13617 pptr->port_ipkt_list->ipkt_prev =
13658 13618 NULL;
13659 13619 }
13660 13620 } else {
13661 13621 icmd->ipkt_prev->ipkt_next = icmd->ipkt_next;
13662 13622 if (icmd->ipkt_next) {
13663 13623 icmd->ipkt_next->ipkt_prev =
13664 13624 icmd->ipkt_prev;
13665 13625 }
13666 13626 }
13667 13627 icmd->ipkt_next = NULL;
13668 13628 icmd->ipkt_prev = NULL;
13669 13629 mutex_exit(&pptr->port_mutex);
13670 13630
13671 13631 if (fcp_is_retryable(icmd)) {
13672 13632 fc_ulp_rscn_info_t *rscnp =
13673 13633 (fc_ulp_rscn_info_t *)icmd->ipkt_fpkt->
13674 13634 pkt_ulp_rscn_infop;
13675 13635
13676 13636 FCP_TRACE(fcp_logq, pptr->port_instbuf,
13677 13637 fcp_trace, FCP_BUF_LEVEL_2, 0,
13678 13638 "%x to D_ID=%x Retrying..",
13679 13639 icmd->ipkt_opcode,
13680 13640 icmd->ipkt_fpkt->pkt_cmd_fhdr.d_id);
13681 13641
13682 13642 /*
13683 13643 * Update the RSCN count in the packet
13684 13644 * before resending.
13685 13645 */
13686 13646
13687 13647 if (rscnp != NULL) {
13688 13648 rscnp->ulp_rscn_count =
13689 13649 fc_ulp_get_rscn_count(pptr->
13690 13650 port_fp_handle);
13691 13651 }
13692 13652
13693 13653 mutex_enter(&pptr->port_mutex);
13694 13654 mutex_enter(&ptgt->tgt_mutex);
13695 13655 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
13696 13656 mutex_exit(&ptgt->tgt_mutex);
13697 13657 mutex_exit(&pptr->port_mutex);
13698 13658 switch (icmd->ipkt_opcode) {
13699 13659 int rval;
13700 13660 case LA_ELS_PLOGI:
13701 13661 if ((rval = fc_ulp_login(
13702 13662 pptr->port_fp_handle,
13703 13663 &icmd->ipkt_fpkt, 1)) ==
13704 13664 FC_SUCCESS) {
13705 13665 mutex_enter(
13706 13666 &pptr->port_mutex);
13707 13667 continue;
13708 13668 }
13709 13669 if (fcp_handle_ipkt_errors(
13710 13670 pptr, ptgt, icmd, rval,
13711 13671 "PLOGI") == DDI_SUCCESS) {
13712 13672 mutex_enter(
13713 13673 &pptr->port_mutex);
13714 13674 continue;
13715 13675 }
13716 13676 break;
13717 13677
13718 13678 case LA_ELS_PRLI:
13719 13679 if ((rval = fc_ulp_issue_els(
13720 13680 pptr->port_fp_handle,
13721 13681 icmd->ipkt_fpkt)) ==
13722 13682 FC_SUCCESS) {
13723 13683 mutex_enter(
13724 13684 &pptr->port_mutex);
13725 13685 continue;
13726 13686 }
13727 13687 if (fcp_handle_ipkt_errors(
13728 13688 pptr, ptgt, icmd, rval,
13729 13689 "PRLI") == DDI_SUCCESS) {
13730 13690 mutex_enter(
13731 13691 &pptr->port_mutex);
13732 13692 continue;
13733 13693 }
13734 13694 break;
13735 13695
13736 13696 default:
13737 13697 if ((rval = fcp_transport(
13738 13698 pptr->port_fp_handle,
13739 13699 icmd->ipkt_fpkt, 1)) ==
13740 13700 FC_SUCCESS) {
13741 13701 mutex_enter(
13742 13702 &pptr->port_mutex);
13743 13703 continue;
13744 13704 }
13745 13705 if (fcp_handle_ipkt_errors(
13746 13706 pptr, ptgt, icmd, rval,
13747 13707 "PRLI") == DDI_SUCCESS) {
13748 13708 mutex_enter(
13749 13709 &pptr->port_mutex);
13750 13710 continue;
13751 13711 }
13752 13712 break;
13753 13713 }
13754 13714 } else {
13755 13715 mutex_exit(&ptgt->tgt_mutex);
13756 13716 mutex_exit(&pptr->port_mutex);
13757 13717 }
13758 13718 } else {
13759 13719 fcp_print_error(icmd->ipkt_fpkt);
13760 13720 }
13761 13721
13762 13722 (void) fcp_call_finish_init(pptr, ptgt,
13763 13723 icmd->ipkt_link_cnt, icmd->ipkt_change_cnt,
13764 13724 icmd->ipkt_cause);
13765 13725 fcp_icmd_free(pptr, icmd);
13766 13726 mutex_enter(&pptr->port_mutex);
13767 13727 }
13768 13728
13769 13729 pptr->port_state &= ~FCP_STATE_IN_WATCHDOG;
13770 13730 mutex_exit(&pptr->port_mutex);
13771 13731 mutex_enter(&fcp_global_mutex);
13772 13732
13773 13733 end_of_watchdog:
13774 13734 /*
13775 13735 * Bail out early before getting into trouble
13776 13736 */
13777 13737 if (save_port != fcp_port_head) {
13778 13738 break;
13779 13739 }
13780 13740 }
13781 13741
13782 13742 if (fcp_watchdog_init > 0) {
13783 13743 /* reschedule timeout to go again */
13784 13744 fcp_watchdog_id =
13785 13745 timeout(fcp_watch, NULL, fcp_watchdog_tick);
13786 13746 }
13787 13747 mutex_exit(&fcp_global_mutex);
13788 13748 }
13789 13749
13790 13750
13791 13751 static void
13792 13752 fcp_check_reset_delay(struct fcp_port *pptr)
13793 13753 {
13794 13754 uint32_t tgt_cnt;
13795 13755 int level;
13796 13756 struct fcp_tgt *ptgt;
13797 13757 struct fcp_lun *plun;
13798 13758 struct fcp_reset_elem *cur = NULL;
13799 13759 struct fcp_reset_elem *next = NULL;
13800 13760 struct fcp_reset_elem *prev = NULL;
13801 13761
13802 13762 ASSERT(mutex_owned(&pptr->port_mutex));
13803 13763
13804 13764 next = pptr->port_reset_list;
13805 13765 while ((cur = next) != NULL) {
13806 13766 next = cur->next;
13807 13767
13808 13768 if (cur->timeout < fcp_watchdog_time) {
13809 13769 prev = cur;
13810 13770 continue;
13811 13771 }
13812 13772
13813 13773 ptgt = cur->tgt;
13814 13774 plun = cur->lun;
13815 13775 tgt_cnt = cur->tgt_cnt;
13816 13776
13817 13777 if (ptgt) {
13818 13778 level = RESET_TARGET;
13819 13779 } else {
13820 13780 ASSERT(plun != NULL);
13821 13781 level = RESET_LUN;
13822 13782 ptgt = plun->lun_tgt;
13823 13783 }
13824 13784 if (prev) {
13825 13785 prev->next = next;
13826 13786 } else {
13827 13787 /*
13828 13788 * Because we drop port mutex while doing aborts for
13829 13789 * packets, we can't rely on reset_list pointing to
13830 13790 * our head
13831 13791 */
13832 13792 if (cur == pptr->port_reset_list) {
13833 13793 pptr->port_reset_list = next;
13834 13794 } else {
13835 13795 struct fcp_reset_elem *which;
13836 13796
13837 13797 which = pptr->port_reset_list;
13838 13798 while (which && which->next != cur) {
13839 13799 which = which->next;
13840 13800 }
13841 13801 ASSERT(which != NULL);
13842 13802
13843 13803 which->next = next;
13844 13804 prev = which;
13845 13805 }
13846 13806 }
13847 13807
13848 13808 kmem_free(cur, sizeof (*cur));
13849 13809
13850 13810 if (tgt_cnt == ptgt->tgt_change_cnt) {
13851 13811 mutex_enter(&ptgt->tgt_mutex);
13852 13812 if (level == RESET_TARGET) {
13853 13813 fcp_update_tgt_state(ptgt,
13854 13814 FCP_RESET, FCP_LUN_BUSY);
13855 13815 } else {
13856 13816 fcp_update_lun_state(plun,
13857 13817 FCP_RESET, FCP_LUN_BUSY);
13858 13818 }
13859 13819 mutex_exit(&ptgt->tgt_mutex);
13860 13820
13861 13821 mutex_exit(&pptr->port_mutex);
13862 13822 fcp_abort_all(pptr, ptgt, plun, tgt_cnt);
13863 13823 mutex_enter(&pptr->port_mutex);
13864 13824 }
13865 13825 }
13866 13826 }
13867 13827
13868 13828
13869 13829 static void
13870 13830 fcp_abort_all(struct fcp_port *pptr, struct fcp_tgt *ttgt,
13871 13831 struct fcp_lun *rlun, int tgt_cnt)
13872 13832 {
13873 13833 int rval;
13874 13834 struct fcp_lun *tlun, *nlun;
13875 13835 struct fcp_pkt *pcmd = NULL, *ncmd = NULL,
13876 13836 *cmd = NULL, *head = NULL,
13877 13837 *tail = NULL;
13878 13838
13879 13839 mutex_enter(&pptr->port_pkt_mutex);
13880 13840 for (cmd = pptr->port_pkt_head; cmd != NULL; cmd = ncmd) {
13881 13841 struct fcp_lun *plun = ADDR2LUN(&cmd->cmd_pkt->pkt_address);
13882 13842 struct fcp_tgt *ptgt = plun->lun_tgt;
13883 13843
13884 13844 ncmd = cmd->cmd_next;
13885 13845
13886 13846 if (ptgt != ttgt && plun != rlun) {
13887 13847 pcmd = cmd;
13888 13848 continue;
13889 13849 }
13890 13850
13891 13851 if (pcmd != NULL) {
13892 13852 ASSERT(pptr->port_pkt_head != cmd);
13893 13853 pcmd->cmd_next = ncmd;
13894 13854 } else {
13895 13855 ASSERT(cmd == pptr->port_pkt_head);
13896 13856 pptr->port_pkt_head = ncmd;
13897 13857 }
13898 13858 if (pptr->port_pkt_tail == cmd) {
13899 13859 ASSERT(cmd->cmd_next == NULL);
13900 13860 pptr->port_pkt_tail = pcmd;
13901 13861 if (pcmd != NULL) {
13902 13862 pcmd->cmd_next = NULL;
13903 13863 }
13904 13864 }
13905 13865
13906 13866 if (head == NULL) {
13907 13867 head = tail = cmd;
13908 13868 } else {
13909 13869 ASSERT(tail != NULL);
13910 13870 tail->cmd_next = cmd;
13911 13871 tail = cmd;
13912 13872 }
13913 13873 cmd->cmd_next = NULL;
13914 13874 }
13915 13875 mutex_exit(&pptr->port_pkt_mutex);
13916 13876
13917 13877 for (cmd = head; cmd != NULL; cmd = ncmd) {
13918 13878 struct scsi_pkt *pkt = cmd->cmd_pkt;
13919 13879
13920 13880 ncmd = cmd->cmd_next;
13921 13881 ASSERT(pkt != NULL);
13922 13882
13923 13883 mutex_enter(&pptr->port_mutex);
13924 13884 if (ttgt->tgt_change_cnt == tgt_cnt) {
13925 13885 mutex_exit(&pptr->port_mutex);
13926 13886 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
13927 13887 pkt->pkt_reason = CMD_RESET;
13928 13888 pkt->pkt_statistics |= STAT_DEV_RESET;
13929 13889 cmd->cmd_state = FCP_PKT_IDLE;
13930 13890 fcp_post_callback(cmd);
13931 13891 } else {
13932 13892 mutex_exit(&pptr->port_mutex);
13933 13893 }
13934 13894 }
13935 13895
13936 13896 /*
13937 13897 * If the FCA will return all the commands in its queue then our
13938 13898 * work is easy, just return.
13939 13899 */
13940 13900
13941 13901 if (pptr->port_reset_action == FC_RESET_RETURN_ALL) {
13942 13902 return;
13943 13903 }
13944 13904
13945 13905 /*
13946 13906 * For RESET_LUN get hold of target pointer
13947 13907 */
13948 13908 if (ttgt == NULL) {
13949 13909 ASSERT(rlun != NULL);
13950 13910
13951 13911 ttgt = rlun->lun_tgt;
13952 13912
13953 13913 ASSERT(ttgt != NULL);
13954 13914 }
13955 13915
13956 13916 /*
13957 13917 * There are some severe race conditions here.
13958 13918 * While we are trying to abort the pkt, it might be completing
13959 13919 * so mark it aborted and if the abort does not succeed then
13960 13920 * handle it in the watch thread.
13961 13921 */
13962 13922 mutex_enter(&ttgt->tgt_mutex);
13963 13923 nlun = ttgt->tgt_lun;
13964 13924 mutex_exit(&ttgt->tgt_mutex);
13965 13925 while ((tlun = nlun) != NULL) {
13966 13926 int restart = 0;
13967 13927 if (rlun && rlun != tlun) {
13968 13928 mutex_enter(&ttgt->tgt_mutex);
13969 13929 nlun = tlun->lun_next;
13970 13930 mutex_exit(&ttgt->tgt_mutex);
13971 13931 continue;
13972 13932 }
13973 13933 mutex_enter(&tlun->lun_mutex);
13974 13934 cmd = tlun->lun_pkt_head;
13975 13935 while (cmd != NULL) {
13976 13936 if (cmd->cmd_state == FCP_PKT_ISSUED) {
13977 13937 struct scsi_pkt *pkt;
13978 13938
13979 13939 restart = 1;
13980 13940 cmd->cmd_state = FCP_PKT_ABORTING;
13981 13941 mutex_exit(&tlun->lun_mutex);
13982 13942 rval = fc_ulp_abort(pptr->port_fp_handle,
13983 13943 cmd->cmd_fp_pkt, KM_SLEEP);
13984 13944 if (rval == FC_SUCCESS) {
13985 13945 pkt = cmd->cmd_pkt;
13986 13946 pkt->pkt_reason = CMD_RESET;
13987 13947 pkt->pkt_statistics |= STAT_DEV_RESET;
13988 13948 cmd->cmd_state = FCP_PKT_IDLE;
13989 13949 fcp_post_callback(cmd);
13990 13950 } else {
13991 13951 caddr_t msg;
13992 13952
13993 13953 (void) fc_ulp_error(rval, &msg);
13994 13954
13995 13955 /*
13996 13956 * This part is tricky. The abort
13997 13957 * failed and now the command could
13998 13958 * be completing. The cmd_state ==
13999 13959 * FCP_PKT_ABORTING should save
14000 13960 * us in fcp_cmd_callback. If we
14001 13961 * are already aborting ignore the
14002 13962 * command in fcp_cmd_callback.
14003 13963 * Here we leave this packet for 20
14004 13964 * sec to be aborted in the
14005 13965 * fcp_watch thread.
14006 13966 */
14007 13967 fcp_log(CE_WARN, pptr->port_dip,
14008 13968 "!Abort failed after reset %s",
14009 13969 msg);
14010 13970
14011 13971 cmd->cmd_timeout =
14012 13972 fcp_watchdog_time +
14013 13973 cmd->cmd_pkt->pkt_time +
14014 13974 FCP_FAILED_DELAY;
14015 13975
14016 13976 cmd->cmd_fp_pkt->pkt_timeout =
14017 13977 FCP_INVALID_TIMEOUT;
14018 13978 /*
14019 13979 * This is a hack, cmd is put in the
14020 13980 * overflow queue so that it can be
14021 13981 * timed out finally
14022 13982 */
14023 13983 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14024 13984
14025 13985 mutex_enter(&pptr->port_pkt_mutex);
14026 13986 if (pptr->port_pkt_head) {
14027 13987 ASSERT(pptr->port_pkt_tail
14028 13988 != NULL);
14029 13989 pptr->port_pkt_tail->cmd_next
14030 13990 = cmd;
14031 13991 pptr->port_pkt_tail = cmd;
14032 13992 } else {
14033 13993 ASSERT(pptr->port_pkt_tail
14034 13994 == NULL);
14035 13995 pptr->port_pkt_head =
14036 13996 pptr->port_pkt_tail
14037 13997 = cmd;
14038 13998 }
14039 13999 cmd->cmd_next = NULL;
14040 14000 mutex_exit(&pptr->port_pkt_mutex);
14041 14001 }
14042 14002 mutex_enter(&tlun->lun_mutex);
14043 14003 cmd = tlun->lun_pkt_head;
14044 14004 } else {
14045 14005 cmd = cmd->cmd_forw;
14046 14006 }
14047 14007 }
14048 14008 mutex_exit(&tlun->lun_mutex);
14049 14009
14050 14010 mutex_enter(&ttgt->tgt_mutex);
14051 14011 restart == 1 ? (nlun = ttgt->tgt_lun) : (nlun = tlun->lun_next);
14052 14012 mutex_exit(&ttgt->tgt_mutex);
14053 14013
14054 14014 mutex_enter(&pptr->port_mutex);
14055 14015 if (tgt_cnt != ttgt->tgt_change_cnt) {
14056 14016 mutex_exit(&pptr->port_mutex);
14057 14017 return;
14058 14018 } else {
14059 14019 mutex_exit(&pptr->port_mutex);
14060 14020 }
14061 14021 }
14062 14022 }
14063 14023
14064 14024
14065 14025 /*
14066 14026 * unlink the soft state, returning the soft state found (if any)
14067 14027 *
14068 14028 * acquires and releases the global mutex
14069 14029 */
14070 14030 struct fcp_port *
14071 14031 fcp_soft_state_unlink(struct fcp_port *pptr)
14072 14032 {
14073 14033 struct fcp_port *hptr; /* ptr index */
14074 14034 struct fcp_port *tptr; /* prev hptr */
14075 14035
14076 14036 mutex_enter(&fcp_global_mutex);
14077 14037 for (hptr = fcp_port_head, tptr = NULL;
14078 14038 hptr != NULL;
14079 14039 tptr = hptr, hptr = hptr->port_next) {
14080 14040 if (hptr == pptr) {
14081 14041 /* we found a match -- remove this item */
14082 14042 if (tptr == NULL) {
14083 14043 /* we're at the head of the list */
14084 14044 fcp_port_head = hptr->port_next;
14085 14045 } else {
14086 14046 tptr->port_next = hptr->port_next;
14087 14047 }
14088 14048 break; /* success */
14089 14049 }
14090 14050 }
14091 14051 if (fcp_port_head == NULL) {
14092 14052 fcp_cleanup_blacklist(&fcp_lun_blacklist);
14093 14053 }
14094 14054 mutex_exit(&fcp_global_mutex);
14095 14055 return (hptr);
14096 14056 }
14097 14057
14098 14058
14099 14059 /*
14100 14060 * called by fcp_scsi_hba_tgt_init to find a LUN given a
14101 14061 * WWN and a LUN number
14102 14062 */
14103 14063 /* ARGSUSED */
14104 14064 static struct fcp_lun *
14105 14065 fcp_lookup_lun(struct fcp_port *pptr, uchar_t *wwn, uint16_t lun)
14106 14066 {
14107 14067 int hash;
14108 14068 struct fcp_tgt *ptgt;
14109 14069 struct fcp_lun *plun;
14110 14070
14111 14071 ASSERT(mutex_owned(&pptr->port_mutex));
14112 14072
14113 14073 hash = FCP_HASH(wwn);
14114 14074 for (ptgt = pptr->port_tgt_hash_table[hash]; ptgt != NULL;
14115 14075 ptgt = ptgt->tgt_next) {
14116 14076 if (bcmp((caddr_t)wwn, (caddr_t)&ptgt->tgt_port_wwn.raw_wwn[0],
14117 14077 sizeof (ptgt->tgt_port_wwn)) == 0) {
14118 14078 mutex_enter(&ptgt->tgt_mutex);
14119 14079 for (plun = ptgt->tgt_lun;
14120 14080 plun != NULL;
14121 14081 plun = plun->lun_next) {
14122 14082 if (plun->lun_num == lun) {
14123 14083 mutex_exit(&ptgt->tgt_mutex);
14124 14084 return (plun);
14125 14085 }
14126 14086 }
14127 14087 mutex_exit(&ptgt->tgt_mutex);
14128 14088 return (NULL);
14129 14089 }
14130 14090 }
14131 14091 return (NULL);
14132 14092 }
14133 14093
14134 14094 /*
14135 14095 * Function: fcp_prepare_pkt
14136 14096 *
14137 14097 * Description: This function prepares the SCSI cmd pkt, passed by the caller,
14138 14098 * for fcp_start(). It binds the data or partially maps it.
14139 14099 * Builds the FCP header and starts the initialization of the
14140 14100 * Fibre Channel header.
14141 14101 *
14142 14102 * Argument: *pptr FCP port.
14143 14103 * *cmd FCP packet.
14144 14104 * *plun LUN the command will be sent to.
14145 14105 *
14146 14106 * Context: User, Kernel and Interrupt context.
14147 14107 */
14148 14108 static void
14149 14109 fcp_prepare_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd,
14150 14110 struct fcp_lun *plun)
14151 14111 {
14152 14112 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
14153 14113 struct fcp_tgt *ptgt = plun->lun_tgt;
14154 14114 struct fcp_cmd *fcmd = &cmd->cmd_fcp_cmd;
14155 14115
14156 14116 ASSERT(cmd->cmd_pkt->pkt_comp ||
14157 14117 (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR));
14158 14118
14159 14119 if (cmd->cmd_pkt->pkt_numcookies) {
14160 14120 if (cmd->cmd_pkt->pkt_dma_flags & DDI_DMA_READ) {
14161 14121 fcmd->fcp_cntl.cntl_read_data = 1;
14162 14122 fcmd->fcp_cntl.cntl_write_data = 0;
14163 14123 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
14164 14124 } else {
14165 14125 fcmd->fcp_cntl.cntl_read_data = 0;
14166 14126 fcmd->fcp_cntl.cntl_write_data = 1;
14167 14127 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
14168 14128 }
14169 14129
14170 14130 fpkt->pkt_data_cookie = cmd->cmd_pkt->pkt_cookies;
14171 14131
14172 14132 fpkt->pkt_data_cookie_cnt = cmd->cmd_pkt->pkt_numcookies;
14173 14133 ASSERT(fpkt->pkt_data_cookie_cnt <=
14174 14134 pptr->port_data_dma_attr.dma_attr_sgllen);
14175 14135
14176 14136 cmd->cmd_dmacount = cmd->cmd_pkt->pkt_dma_len;
14177 14137
14178 14138 /* FCA needs pkt_datalen to be set */
14179 14139 fpkt->pkt_datalen = cmd->cmd_dmacount;
14180 14140 fcmd->fcp_data_len = cmd->cmd_dmacount;
14181 14141 } else {
14182 14142 fcmd->fcp_cntl.cntl_read_data = 0;
14183 14143 fcmd->fcp_cntl.cntl_write_data = 0;
14184 14144 fpkt->pkt_tran_type = FC_PKT_EXCHANGE;
14185 14145 fpkt->pkt_datalen = 0;
14186 14146 fcmd->fcp_data_len = 0;
14187 14147 }
14188 14148
14189 14149 /* set up the Tagged Queuing type */
14190 14150 if (cmd->cmd_pkt->pkt_flags & FLAG_HTAG) {
14191 14151 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
14192 14152 } else if (cmd->cmd_pkt->pkt_flags & FLAG_OTAG) {
14193 14153 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
14194 14154 } else if (cmd->cmd_pkt->pkt_flags & FLAG_STAG) {
14195 14155 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
14196 14156 } else {
14197 14157 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
14198 14158 }
14199 14159
14200 14160 fcmd->fcp_ent_addr = plun->lun_addr;
14201 14161
14202 14162 if (pptr->port_fcp_dma != FC_NO_DVMA_SPACE) {
14203 14163 FCP_CP_OUT((uint8_t *)fcmd, fpkt->pkt_cmd,
14204 14164 fpkt->pkt_cmd_acc, sizeof (struct fcp_cmd));
14205 14165 } else {
14206 14166 ASSERT(fpkt->pkt_cmd_dma == NULL && fpkt->pkt_resp_dma == NULL);
14207 14167 }
14208 14168
14209 14169 cmd->cmd_pkt->pkt_reason = CMD_CMPLT;
14210 14170 cmd->cmd_pkt->pkt_state = 0;
14211 14171 cmd->cmd_pkt->pkt_statistics = 0;
14212 14172 cmd->cmd_pkt->pkt_resid = 0;
14213 14173
14214 14174 cmd->cmd_fp_pkt->pkt_data_dma = cmd->cmd_pkt->pkt_handle;
14215 14175
14216 14176 if (cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) {
14217 14177 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
14218 14178 fpkt->pkt_comp = NULL;
14219 14179 } else {
14220 14180 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
14221 14181 if (cmd->cmd_pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
14222 14182 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
14223 14183 }
14224 14184 fpkt->pkt_comp = fcp_cmd_callback;
14225 14185 }
14226 14186
14227 14187 mutex_enter(&pptr->port_mutex);
14228 14188 if (pptr->port_state & FCP_STATE_SUSPENDED) {
14229 14189 fpkt->pkt_tran_flags |= FC_TRAN_DUMPING;
14230 14190 }
14231 14191 mutex_exit(&pptr->port_mutex);
14232 14192
14233 14193 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
14234 14194 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
14235 14195
14236 14196 /*
14237 14197 * Save a few kernel cycles here
14238 14198 */
14239 14199 #ifndef __lock_lint
14240 14200 fpkt->pkt_fca_device = ptgt->tgt_fca_dev;
14241 14201 #endif /* __lock_lint */
14242 14202 }
14243 14203
14244 14204 static void
14245 14205 fcp_post_callback(struct fcp_pkt *cmd)
14246 14206 {
14247 14207 scsi_hba_pkt_comp(cmd->cmd_pkt);
14248 14208 }
14249 14209
14250 14210
14251 14211 /*
14252 14212 * called to do polled I/O by fcp_start()
14253 14213 *
14254 14214 * return a transport status value, i.e. TRAN_ACCECPT for success
14255 14215 */
14256 14216 static int
14257 14217 fcp_dopoll(struct fcp_port *pptr, struct fcp_pkt *cmd)
14258 14218 {
14259 14219 int rval;
14260 14220
14261 14221 #ifdef DEBUG
14262 14222 mutex_enter(&pptr->port_pkt_mutex);
14263 14223 pptr->port_npkts++;
14264 14224 mutex_exit(&pptr->port_pkt_mutex);
14265 14225 #endif /* DEBUG */
14266 14226
14267 14227 if (cmd->cmd_fp_pkt->pkt_timeout) {
14268 14228 cmd->cmd_fp_pkt->pkt_timeout = cmd->cmd_pkt->pkt_time;
14269 14229 } else {
14270 14230 cmd->cmd_fp_pkt->pkt_timeout = FCP_POLL_TIMEOUT;
14271 14231 }
14272 14232
14273 14233 ASSERT(cmd->cmd_fp_pkt->pkt_comp == NULL);
14274 14234
14275 14235 cmd->cmd_state = FCP_PKT_ISSUED;
14276 14236
14277 14237 rval = fc_ulp_transport(pptr->port_fp_handle, cmd->cmd_fp_pkt);
14278 14238
14279 14239 #ifdef DEBUG
14280 14240 mutex_enter(&pptr->port_pkt_mutex);
14281 14241 pptr->port_npkts--;
14282 14242 mutex_exit(&pptr->port_pkt_mutex);
14283 14243 #endif /* DEBUG */
14284 14244
14285 14245 cmd->cmd_state = FCP_PKT_IDLE;
14286 14246
14287 14247 switch (rval) {
14288 14248 case FC_SUCCESS:
14289 14249 if (cmd->cmd_fp_pkt->pkt_state == FC_PKT_SUCCESS) {
14290 14250 fcp_complete_pkt(cmd->cmd_fp_pkt);
14291 14251 rval = TRAN_ACCEPT;
14292 14252 } else {
14293 14253 rval = TRAN_FATAL_ERROR;
14294 14254 }
14295 14255 break;
14296 14256
14297 14257 case FC_TRAN_BUSY:
14298 14258 rval = TRAN_BUSY;
14299 14259 cmd->cmd_pkt->pkt_resid = 0;
14300 14260 break;
14301 14261
14302 14262 case FC_BADPACKET:
14303 14263 rval = TRAN_BADPKT;
14304 14264 break;
14305 14265
14306 14266 default:
14307 14267 rval = TRAN_FATAL_ERROR;
14308 14268 break;
14309 14269 }
14310 14270
14311 14271 return (rval);
14312 14272 }
14313 14273
14314 14274
14315 14275 /*
14316 14276 * called by some of the following transport-called routines to convert
14317 14277 * a supplied dip ptr to a port struct ptr (i.e. to the soft state)
14318 14278 */
14319 14279 static struct fcp_port *
14320 14280 fcp_dip2port(dev_info_t *dip)
14321 14281 {
14322 14282 int instance;
14323 14283
14324 14284 instance = ddi_get_instance(dip);
14325 14285 return (ddi_get_soft_state(fcp_softstate, instance));
14326 14286 }
14327 14287
14328 14288
14329 14289 /*
14330 14290 * called internally to return a LUN given a dip
14331 14291 */
14332 14292 struct fcp_lun *
14333 14293 fcp_get_lun_from_cip(struct fcp_port *pptr, child_info_t *cip)
14334 14294 {
14335 14295 struct fcp_tgt *ptgt;
14336 14296 struct fcp_lun *plun;
14337 14297 int i;
14338 14298
14339 14299
14340 14300 ASSERT(mutex_owned(&pptr->port_mutex));
14341 14301
14342 14302 for (i = 0; i < FCP_NUM_HASH; i++) {
14343 14303 for (ptgt = pptr->port_tgt_hash_table[i];
14344 14304 ptgt != NULL;
14345 14305 ptgt = ptgt->tgt_next) {
14346 14306 mutex_enter(&ptgt->tgt_mutex);
14347 14307 for (plun = ptgt->tgt_lun; plun != NULL;
14348 14308 plun = plun->lun_next) {
14349 14309 mutex_enter(&plun->lun_mutex);
14350 14310 if (plun->lun_cip == cip) {
14351 14311 mutex_exit(&plun->lun_mutex);
14352 14312 mutex_exit(&ptgt->tgt_mutex);
14353 14313 return (plun); /* match found */
14354 14314 }
14355 14315 mutex_exit(&plun->lun_mutex);
14356 14316 }
14357 14317 mutex_exit(&ptgt->tgt_mutex);
14358 14318 }
14359 14319 }
14360 14320 return (NULL); /* no LUN found */
14361 14321 }
14362 14322
14363 14323 /*
14364 14324 * pass an element to the hotplug list, kick the hotplug thread
14365 14325 * and wait for the element to get processed by the hotplug thread.
14366 14326 * on return the element is freed.
14367 14327 *
14368 14328 * return zero success and non-zero on failure
14369 14329 *
14370 14330 * acquires/releases the target mutex
14371 14331 *
14372 14332 */
14373 14333 static int
14374 14334 fcp_pass_to_hp_and_wait(struct fcp_port *pptr, struct fcp_lun *plun,
14375 14335 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags)
14376 14336 {
14377 14337 struct fcp_hp_elem *elem;
14378 14338 int rval;
14379 14339
14380 14340 mutex_enter(&plun->lun_tgt->tgt_mutex);
14381 14341 if ((elem = fcp_pass_to_hp(pptr, plun, cip,
14382 14342 what, link_cnt, tgt_cnt, flags, 1)) == NULL) {
14383 14343 mutex_exit(&plun->lun_tgt->tgt_mutex);
14384 14344 fcp_log(CE_CONT, pptr->port_dip,
14385 14345 "Can not pass_to_hp: what: %d; D_ID=%x, LUN=%x\n",
14386 14346 what, plun->lun_tgt->tgt_d_id, plun->lun_num);
14387 14347 return (NDI_FAILURE);
14388 14348 }
14389 14349 mutex_exit(&plun->lun_tgt->tgt_mutex);
14390 14350 mutex_enter(&elem->mutex);
14391 14351 if (elem->wait) {
14392 14352 while (elem->wait) {
14393 14353 cv_wait(&elem->cv, &elem->mutex);
14394 14354 }
14395 14355 }
14396 14356 rval = (elem->result);
14397 14357 mutex_exit(&elem->mutex);
14398 14358 mutex_destroy(&elem->mutex);
14399 14359 cv_destroy(&elem->cv);
14400 14360 kmem_free(elem, sizeof (struct fcp_hp_elem));
14401 14361 return (rval);
14402 14362 }
14403 14363
14404 14364 /*
14405 14365 * pass an element to the hotplug list, and then
14406 14366 * kick the hotplug thread
14407 14367 *
14408 14368 * return Boolean success, i.e. non-zero if all goes well, else zero on error
14409 14369 *
14410 14370 * acquires/releases the hotplug mutex
14411 14371 *
14412 14372 * called with the target mutex owned
14413 14373 *
14414 14374 * memory acquired in NOSLEEP mode
14415 14375 * NOTE: if wait is set to 1 then the caller is responsible for waiting on
14416 14376 * for the hp daemon to process the request and is responsible for
14417 14377 * freeing the element
14418 14378 */
14419 14379 static struct fcp_hp_elem *
14420 14380 fcp_pass_to_hp(struct fcp_port *pptr, struct fcp_lun *plun,
14421 14381 child_info_t *cip, int what, int link_cnt, int tgt_cnt, int flags, int wait)
14422 14382 {
14423 14383 struct fcp_hp_elem *elem;
14424 14384 dev_info_t *pdip;
14425 14385
14426 14386 ASSERT(pptr != NULL);
14427 14387 ASSERT(plun != NULL);
14428 14388 ASSERT(plun->lun_tgt != NULL);
14429 14389 ASSERT(mutex_owned(&plun->lun_tgt->tgt_mutex));
14430 14390
14431 14391 /* create space for a hotplug element */
14432 14392 if ((elem = kmem_zalloc(sizeof (struct fcp_hp_elem), KM_NOSLEEP))
14433 14393 == NULL) {
14434 14394 fcp_log(CE_WARN, NULL,
14435 14395 "!can't allocate memory for hotplug element");
14436 14396 return (NULL);
14437 14397 }
14438 14398
14439 14399 /* fill in hotplug element */
14440 14400 elem->port = pptr;
14441 14401 elem->lun = plun;
14442 14402 elem->cip = cip;
14443 14403 elem->old_lun_mpxio = plun->lun_mpxio;
14444 14404 elem->what = what;
14445 14405 elem->flags = flags;
14446 14406 elem->link_cnt = link_cnt;
14447 14407 elem->tgt_cnt = tgt_cnt;
14448 14408 elem->wait = wait;
14449 14409 mutex_init(&elem->mutex, NULL, MUTEX_DRIVER, NULL);
14450 14410 cv_init(&elem->cv, NULL, CV_DRIVER, NULL);
14451 14411
14452 14412 /* schedule the hotplug task */
14453 14413 pdip = pptr->port_dip;
14454 14414 mutex_enter(&plun->lun_mutex);
14455 14415 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14456 14416 plun->lun_event_count++;
14457 14417 elem->event_cnt = plun->lun_event_count;
14458 14418 }
14459 14419 mutex_exit(&plun->lun_mutex);
14460 14420 if (taskq_dispatch(DEVI(pdip)->devi_taskq, fcp_hp_task,
14461 14421 (void *)elem, KM_NOSLEEP) == NULL) {
14462 14422 mutex_enter(&plun->lun_mutex);
14463 14423 if (elem->what == FCP_ONLINE || elem->what == FCP_OFFLINE) {
14464 14424 plun->lun_event_count--;
14465 14425 }
14466 14426 mutex_exit(&plun->lun_mutex);
14467 14427 kmem_free(elem, sizeof (*elem));
14468 14428 return (0);
14469 14429 }
14470 14430
14471 14431 return (elem);
14472 14432 }
14473 14433
14474 14434
14475 14435 static void
14476 14436 fcp_retransport_cmd(struct fcp_port *pptr, struct fcp_pkt *cmd)
14477 14437 {
14478 14438 int rval;
14479 14439 struct scsi_address *ap;
14480 14440 struct fcp_lun *plun;
14481 14441 struct fcp_tgt *ptgt;
14482 14442 fc_packet_t *fpkt;
14483 14443
14484 14444 ap = &cmd->cmd_pkt->pkt_address;
14485 14445 plun = ADDR2LUN(ap);
14486 14446 ptgt = plun->lun_tgt;
14487 14447
14488 14448 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14489 14449
14490 14450 cmd->cmd_state = FCP_PKT_IDLE;
14491 14451
14492 14452 mutex_enter(&pptr->port_mutex);
14493 14453 mutex_enter(&ptgt->tgt_mutex);
14494 14454 if (((plun->lun_state & (FCP_LUN_BUSY | FCP_LUN_OFFLINE)) == 0) &&
14495 14455 (!(pptr->port_state & FCP_STATE_ONLINING))) {
14496 14456 fc_ulp_rscn_info_t *rscnp;
14497 14457
14498 14458 cmd->cmd_state = FCP_PKT_ISSUED;
14499 14459
14500 14460 /*
14501 14461 * It is possible for pkt_pd to be NULL if tgt_pd_handle was
14502 14462 * originally NULL, hence we try to set it to the pd pointed
14503 14463 * to by the SCSI device we're trying to get to.
14504 14464 */
14505 14465
14506 14466 fpkt = cmd->cmd_fp_pkt;
14507 14467 if ((fpkt->pkt_pd == NULL) && (ptgt->tgt_pd_handle != NULL)) {
14508 14468 fpkt->pkt_pd = ptgt->tgt_pd_handle;
14509 14469 /*
14510 14470 * We need to notify the transport that we now have a
14511 14471 * reference to the remote port handle.
14512 14472 */
14513 14473 fc_ulp_hold_remote_port(ptgt->tgt_pd_handle);
14514 14474 }
14515 14475
14516 14476 mutex_exit(&ptgt->tgt_mutex);
14517 14477 mutex_exit(&pptr->port_mutex);
14518 14478
14519 14479 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOINTR) == 0);
14520 14480
14521 14481 /* prepare the packet */
14522 14482
14523 14483 fcp_prepare_pkt(pptr, cmd, plun);
14524 14484
14525 14485 rscnp = (fc_ulp_rscn_info_t *)cmd->cmd_fp_pkt->
14526 14486 pkt_ulp_rscn_infop;
14527 14487
14528 14488 cmd->cmd_timeout = cmd->cmd_pkt->pkt_time ?
14529 14489 fcp_watchdog_time + cmd->cmd_pkt->pkt_time : 0;
14530 14490
14531 14491 if (rscnp != NULL) {
14532 14492 rscnp->ulp_rscn_count =
14533 14493 fc_ulp_get_rscn_count(pptr->
14534 14494 port_fp_handle);
14535 14495 }
14536 14496
14537 14497 rval = fcp_transport(pptr->port_fp_handle,
14538 14498 cmd->cmd_fp_pkt, 0);
14539 14499
14540 14500 if (rval == FC_SUCCESS) {
14541 14501 return;
14542 14502 }
14543 14503 cmd->cmd_state &= ~FCP_PKT_ISSUED;
14544 14504 } else {
14545 14505 mutex_exit(&ptgt->tgt_mutex);
14546 14506 mutex_exit(&pptr->port_mutex);
14547 14507 }
14548 14508
14549 14509 fcp_queue_pkt(pptr, cmd);
14550 14510 }
14551 14511
14552 14512
14553 14513 static void
14554 14514 fcp_fail_cmd(struct fcp_pkt *cmd, uchar_t reason, uint_t statistics)
14555 14515 {
14556 14516 ASSERT(cmd->cmd_flags & CFLAG_IN_QUEUE);
14557 14517
14558 14518 cmd->cmd_flags &= ~CFLAG_IN_QUEUE;
14559 14519 cmd->cmd_state = FCP_PKT_IDLE;
14560 14520
14561 14521 cmd->cmd_pkt->pkt_reason = reason;
14562 14522 cmd->cmd_pkt->pkt_state = 0;
14563 14523 cmd->cmd_pkt->pkt_statistics = statistics;
14564 14524
14565 14525 fcp_post_callback(cmd);
14566 14526 }
14567 14527
14568 14528 /*
14569 14529 * Function: fcp_queue_pkt
14570 14530 *
14571 14531 * Description: This function queues the packet passed by the caller into
14572 14532 * the list of packets of the FCP port.
14573 14533 *
14574 14534 * Argument: *pptr FCP port.
14575 14535 * *cmd FCP packet to queue.
14576 14536 *
14577 14537 * Return Value: None
14578 14538 *
14579 14539 * Context: User, Kernel and Interrupt context.
14580 14540 */
14581 14541 static void
14582 14542 fcp_queue_pkt(struct fcp_port *pptr, struct fcp_pkt *cmd)
14583 14543 {
14584 14544 ASSERT((cmd->cmd_pkt->pkt_flags & FLAG_NOQUEUE) == NULL);
14585 14545
14586 14546 mutex_enter(&pptr->port_pkt_mutex);
14587 14547 cmd->cmd_flags |= CFLAG_IN_QUEUE;
14588 14548 ASSERT(cmd->cmd_state != FCP_PKT_ISSUED);
14589 14549 cmd->cmd_timeout = fcp_watchdog_time + FCP_QUEUE_DELAY;
14590 14550
14591 14551 /*
14592 14552 * zero pkt_time means hang around for ever
14593 14553 */
14594 14554 if (cmd->cmd_pkt->pkt_time) {
14595 14555 if (cmd->cmd_fp_pkt->pkt_timeout > FCP_QUEUE_DELAY) {
14596 14556 cmd->cmd_fp_pkt->pkt_timeout -= FCP_QUEUE_DELAY;
14597 14557 } else {
14598 14558 /*
14599 14559 * Indicate the watch thread to fail the
14600 14560 * command by setting it to highest value
14601 14561 */
14602 14562 cmd->cmd_timeout = fcp_watchdog_time;
14603 14563 cmd->cmd_fp_pkt->pkt_timeout = FCP_INVALID_TIMEOUT;
14604 14564 }
14605 14565 }
14606 14566
14607 14567 if (pptr->port_pkt_head) {
14608 14568 ASSERT(pptr->port_pkt_tail != NULL);
14609 14569
14610 14570 pptr->port_pkt_tail->cmd_next = cmd;
14611 14571 pptr->port_pkt_tail = cmd;
14612 14572 } else {
14613 14573 ASSERT(pptr->port_pkt_tail == NULL);
14614 14574
14615 14575 pptr->port_pkt_head = pptr->port_pkt_tail = cmd;
14616 14576 }
14617 14577 cmd->cmd_next = NULL;
14618 14578 mutex_exit(&pptr->port_pkt_mutex);
14619 14579 }
14620 14580
14621 14581 /*
14622 14582 * Function: fcp_update_targets
14623 14583 *
14624 14584 * Description: This function applies the specified change of state to all
14625 14585 * the targets listed. The operation applied is 'set'.
14626 14586 *
14627 14587 * Argument: *pptr FCP port.
14628 14588 * *dev_list Array of fc_portmap_t structures.
14629 14589 * count Length of dev_list.
14630 14590 * state State bits to update.
14631 14591 * cause Reason for the update.
14632 14592 *
14633 14593 * Return Value: None
14634 14594 *
14635 14595 * Context: User, Kernel and Interrupt context.
14636 14596 * The mutex pptr->port_mutex must be held.
14637 14597 */
14638 14598 static void
14639 14599 fcp_update_targets(struct fcp_port *pptr, fc_portmap_t *dev_list,
14640 14600 uint32_t count, uint32_t state, int cause)
14641 14601 {
14642 14602 fc_portmap_t *map_entry;
14643 14603 struct fcp_tgt *ptgt;
14644 14604
14645 14605 ASSERT(MUTEX_HELD(&pptr->port_mutex));
14646 14606
14647 14607 while (count--) {
14648 14608 map_entry = &(dev_list[count]);
14649 14609 ptgt = fcp_lookup_target(pptr,
14650 14610 (uchar_t *)&(map_entry->map_pwwn));
14651 14611 if (ptgt == NULL) {
14652 14612 continue;
14653 14613 }
14654 14614
14655 14615 mutex_enter(&ptgt->tgt_mutex);
14656 14616 ptgt->tgt_trace = 0;
14657 14617 ptgt->tgt_change_cnt++;
14658 14618 ptgt->tgt_statec_cause = cause;
14659 14619 ptgt->tgt_tmp_cnt = 1;
14660 14620 fcp_update_tgt_state(ptgt, FCP_SET, state);
14661 14621 mutex_exit(&ptgt->tgt_mutex);
14662 14622 }
14663 14623 }
14664 14624
14665 14625 static int
14666 14626 fcp_call_finish_init(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14667 14627 int lcount, int tcount, int cause)
14668 14628 {
14669 14629 int rval;
14670 14630
14671 14631 mutex_enter(&pptr->port_mutex);
14672 14632 rval = fcp_call_finish_init_held(pptr, ptgt, lcount, tcount, cause);
14673 14633 mutex_exit(&pptr->port_mutex);
14674 14634
14675 14635 return (rval);
14676 14636 }
14677 14637
14678 14638
14679 14639 static int
14680 14640 fcp_call_finish_init_held(struct fcp_port *pptr, struct fcp_tgt *ptgt,
14681 14641 int lcount, int tcount, int cause)
14682 14642 {
14683 14643 int finish_init = 0;
14684 14644 int finish_tgt = 0;
14685 14645 int do_finish_init = 0;
14686 14646 int rval = FCP_NO_CHANGE;
14687 14647
14688 14648 if (cause == FCP_CAUSE_LINK_CHANGE ||
14689 14649 cause == FCP_CAUSE_LINK_DOWN) {
14690 14650 do_finish_init = 1;
14691 14651 }
14692 14652
14693 14653 if (ptgt != NULL) {
14694 14654 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14695 14655 FCP_BUF_LEVEL_2, 0,
14696 14656 "link_cnt: %d,%d; tgt_cnt: %d,%d; tmp_cnt: %d,%d;"
14697 14657 " cause = %d, d_id = 0x%x, tgt_done = %d",
14698 14658 pptr->port_link_cnt, lcount, ptgt->tgt_change_cnt, tcount,
14699 14659 pptr->port_tmp_cnt, ptgt->tgt_tmp_cnt, cause,
14700 14660 ptgt->tgt_d_id, ptgt->tgt_done);
14701 14661
14702 14662 mutex_enter(&ptgt->tgt_mutex);
14703 14663
14704 14664 if (tcount && (ptgt->tgt_change_cnt != tcount)) {
14705 14665 rval = FCP_DEV_CHANGE;
14706 14666 if (do_finish_init && ptgt->tgt_done == 0) {
14707 14667 ptgt->tgt_done++;
14708 14668 finish_init = 1;
14709 14669 }
14710 14670 } else {
14711 14671 if (--ptgt->tgt_tmp_cnt <= 0) {
14712 14672 ptgt->tgt_tmp_cnt = 0;
14713 14673 finish_tgt = 1;
14714 14674
14715 14675 if (do_finish_init) {
14716 14676 finish_init = 1;
14717 14677 }
14718 14678 }
14719 14679 }
14720 14680 mutex_exit(&ptgt->tgt_mutex);
14721 14681 } else {
14722 14682 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
14723 14683 FCP_BUF_LEVEL_2, 0,
14724 14684 "Call Finish Init for NO target");
14725 14685
14726 14686 if (do_finish_init) {
14727 14687 finish_init = 1;
14728 14688 }
14729 14689 }
14730 14690
14731 14691 if (finish_tgt) {
14732 14692 ASSERT(ptgt != NULL);
14733 14693
14734 14694 mutex_enter(&ptgt->tgt_mutex);
14735 14695 #ifdef DEBUG
14736 14696 bzero(ptgt->tgt_tmp_cnt_stack,
14737 14697 sizeof (ptgt->tgt_tmp_cnt_stack));
14738 14698
14739 14699 ptgt->tgt_tmp_cnt_depth = getpcstack(ptgt->tgt_tmp_cnt_stack,
14740 14700 FCP_STACK_DEPTH);
14741 14701 #endif /* DEBUG */
14742 14702 mutex_exit(&ptgt->tgt_mutex);
14743 14703
14744 14704 (void) fcp_finish_tgt(pptr, ptgt, lcount, tcount, cause);
14745 14705 }
14746 14706
14747 14707 if (finish_init && lcount == pptr->port_link_cnt) {
14748 14708 ASSERT(pptr->port_tmp_cnt > 0);
14749 14709 if (--pptr->port_tmp_cnt == 0) {
14750 14710 fcp_finish_init(pptr);
14751 14711 }
14752 14712 } else if (lcount != pptr->port_link_cnt) {
14753 14713 FCP_TRACE(fcp_logq, pptr->port_instbuf,
14754 14714 fcp_trace, FCP_BUF_LEVEL_2, 0,
14755 14715 "fcp_call_finish_init_held,1: state change occured"
14756 14716 " for D_ID=0x%x", (ptgt) ? ptgt->tgt_d_id : 0);
14757 14717 }
14758 14718
14759 14719 return (rval);
14760 14720 }
14761 14721
14762 14722 static void
14763 14723 fcp_reconfigure_luns(void * tgt_handle)
14764 14724 {
14765 14725 uint32_t dev_cnt;
14766 14726 fc_portmap_t *devlist;
14767 14727 struct fcp_tgt *ptgt = (struct fcp_tgt *)tgt_handle;
14768 14728 struct fcp_port *pptr = ptgt->tgt_port;
14769 14729
14770 14730 /*
14771 14731 * If the timer that fires this off got canceled too late, the
14772 14732 * target could have been destroyed.
14773 14733 */
14774 14734
14775 14735 if (ptgt->tgt_tid == NULL) {
14776 14736 return;
14777 14737 }
14778 14738
14779 14739 devlist = kmem_zalloc(sizeof (*devlist), KM_NOSLEEP);
14780 14740 if (devlist == NULL) {
14781 14741 fcp_log(CE_WARN, pptr->port_dip,
14782 14742 "!fcp%d: failed to allocate for portmap",
14783 14743 pptr->port_instance);
14784 14744 return;
14785 14745 }
14786 14746
14787 14747 dev_cnt = 1;
14788 14748 devlist->map_pd = ptgt->tgt_pd_handle;
14789 14749 devlist->map_hard_addr.hard_addr = ptgt->tgt_hard_addr;
14790 14750 devlist->map_did.port_id = ptgt->tgt_d_id;
14791 14751
14792 14752 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0], &devlist->map_nwwn, FC_WWN_SIZE);
14793 14753 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0], &devlist->map_pwwn, FC_WWN_SIZE);
14794 14754
14795 14755 devlist->map_state = PORT_DEVICE_LOGGED_IN;
14796 14756 devlist->map_type = PORT_DEVICE_REPORTLUN_CHANGED;
14797 14757 devlist->map_flags = 0;
14798 14758
14799 14759 fcp_statec_callback(NULL, pptr->port_fp_handle, FC_STATE_DEVICE_CHANGE,
14800 14760 pptr->port_topology, devlist, dev_cnt, pptr->port_id);
14801 14761
14802 14762 /*
14803 14763 * Clear the tgt_tid after no more references to
14804 14764 * the fcp_tgt
14805 14765 */
14806 14766 mutex_enter(&ptgt->tgt_mutex);
14807 14767 ptgt->tgt_tid = NULL;
14808 14768 mutex_exit(&ptgt->tgt_mutex);
14809 14769
14810 14770 kmem_free(devlist, sizeof (*devlist));
14811 14771 }
14812 14772
14813 14773
14814 14774 static void
14815 14775 fcp_free_targets(struct fcp_port *pptr)
14816 14776 {
14817 14777 int i;
14818 14778 struct fcp_tgt *ptgt;
14819 14779
14820 14780 mutex_enter(&pptr->port_mutex);
14821 14781 for (i = 0; i < FCP_NUM_HASH; i++) {
14822 14782 ptgt = pptr->port_tgt_hash_table[i];
14823 14783 while (ptgt != NULL) {
14824 14784 struct fcp_tgt *next_tgt = ptgt->tgt_next;
14825 14785
14826 14786 fcp_free_target(ptgt);
14827 14787 ptgt = next_tgt;
14828 14788 }
14829 14789 }
14830 14790 mutex_exit(&pptr->port_mutex);
14831 14791 }
14832 14792
14833 14793
14834 14794 static void
14835 14795 fcp_free_target(struct fcp_tgt *ptgt)
14836 14796 {
14837 14797 struct fcp_lun *plun;
14838 14798 timeout_id_t tid;
14839 14799
14840 14800 mutex_enter(&ptgt->tgt_mutex);
14841 14801 tid = ptgt->tgt_tid;
14842 14802
14843 14803 /*
14844 14804 * Cancel any pending timeouts for this target.
14845 14805 */
14846 14806
14847 14807 if (tid != NULL) {
14848 14808 /*
14849 14809 * Set tgt_tid to NULL first to avoid a race in the callback.
14850 14810 * If tgt_tid is NULL, the callback will simply return.
14851 14811 */
14852 14812 ptgt->tgt_tid = NULL;
14853 14813 mutex_exit(&ptgt->tgt_mutex);
14854 14814 (void) untimeout(tid);
14855 14815 mutex_enter(&ptgt->tgt_mutex);
14856 14816 }
14857 14817
14858 14818 plun = ptgt->tgt_lun;
14859 14819 while (plun != NULL) {
14860 14820 struct fcp_lun *next_lun = plun->lun_next;
14861 14821
14862 14822 fcp_dealloc_lun(plun);
14863 14823 plun = next_lun;
14864 14824 }
14865 14825
14866 14826 mutex_exit(&ptgt->tgt_mutex);
14867 14827 fcp_dealloc_tgt(ptgt);
14868 14828 }
14869 14829
14870 14830 /*
14871 14831 * Function: fcp_is_retryable
14872 14832 *
14873 14833 * Description: Indicates if the internal packet is retryable.
14874 14834 *
14875 14835 * Argument: *icmd FCP internal packet.
14876 14836 *
14877 14837 * Return Value: 0 Not retryable
14878 14838 * 1 Retryable
14879 14839 *
14880 14840 * Context: User, Kernel and Interrupt context
14881 14841 */
14882 14842 static int
14883 14843 fcp_is_retryable(struct fcp_ipkt *icmd)
14884 14844 {
14885 14845 if (icmd->ipkt_port->port_state & (FCP_STATE_SUSPENDED |
14886 14846 FCP_STATE_DETACHING | FCP_STATE_POWER_DOWN)) {
14887 14847 return (0);
14888 14848 }
14889 14849
14890 14850 return (((fcp_watchdog_time + icmd->ipkt_fpkt->pkt_timeout) <
14891 14851 icmd->ipkt_port->port_deadline) ? 1 : 0);
14892 14852 }
14893 14853
14894 14854 /*
14895 14855 * Function: fcp_create_on_demand
14896 14856 *
14897 14857 * Argument: *pptr FCP port.
14898 14858 * *pwwn Port WWN.
14899 14859 *
14900 14860 * Return Value: 0 Success
14901 14861 * EIO
14902 14862 * ENOMEM
14903 14863 * EBUSY
14904 14864 * EINVAL
14905 14865 *
14906 14866 * Context: User and Kernel context
14907 14867 */
14908 14868 static int
14909 14869 fcp_create_on_demand(struct fcp_port *pptr, uchar_t *pwwn)
14910 14870 {
14911 14871 int wait_ms;
14912 14872 int tcount;
14913 14873 int lcount;
14914 14874 int ret;
14915 14875 int error;
14916 14876 int rval = EIO;
14917 14877 int ntries;
14918 14878 fc_portmap_t *devlist;
14919 14879 opaque_t pd;
14920 14880 struct fcp_lun *plun;
14921 14881 struct fcp_tgt *ptgt;
14922 14882 int old_manual = 0;
14923 14883
14924 14884 /* Allocates the fc_portmap_t structure. */
14925 14885 devlist = kmem_zalloc(sizeof (*devlist), KM_SLEEP);
14926 14886
14927 14887 /*
14928 14888 * If FC_INVALID_RSCN_COUNT is non-zero, we will have to init as shown
14929 14889 * in the commented statement below:
14930 14890 *
14931 14891 * devlist->map_rscn_info.ulp_rscn_count = FC_INVALID_RSCN_COUNT;
14932 14892 *
14933 14893 * Below, the deadline for the discovery process is set.
14934 14894 */
14935 14895 mutex_enter(&pptr->port_mutex);
14936 14896 pptr->port_deadline = fcp_watchdog_time + FCP_ICMD_DEADLINE;
14937 14897 mutex_exit(&pptr->port_mutex);
14938 14898
14939 14899 /*
14940 14900 * We try to find the remote port based on the WWN provided by the
14941 14901 * caller. We actually ask fp/fctl if it has it.
14942 14902 */
14943 14903 pd = fc_ulp_get_remote_port(pptr->port_fp_handle,
14944 14904 (la_wwn_t *)pwwn, &error, 1);
14945 14905
14946 14906 if (pd == NULL) {
14947 14907 kmem_free(devlist, sizeof (*devlist));
14948 14908 return (rval);
14949 14909 }
14950 14910
14951 14911 /*
14952 14912 * The remote port was found. We ask fp/fctl to update our
14953 14913 * fc_portmap_t structure.
14954 14914 */
14955 14915 ret = fc_ulp_pwwn_to_portmap(pptr->port_fp_handle,
14956 14916 (la_wwn_t *)pwwn, devlist);
14957 14917 if (ret != FC_SUCCESS) {
14958 14918 kmem_free(devlist, sizeof (*devlist));
14959 14919 return (rval);
14960 14920 }
14961 14921
14962 14922 /*
14963 14923 * The map flag field is set to indicates that the creation is being
14964 14924 * done at the user request (Ioclt probably luxadm or cfgadm).
14965 14925 */
14966 14926 devlist->map_type = PORT_DEVICE_USER_CREATE;
14967 14927
14968 14928 mutex_enter(&pptr->port_mutex);
14969 14929
14970 14930 /*
14971 14931 * We check to see if fcp already has a target that describes the
14972 14932 * device being created. If not it is created.
14973 14933 */
14974 14934 ptgt = fcp_lookup_target(pptr, pwwn);
14975 14935 if (ptgt == NULL) {
14976 14936 lcount = pptr->port_link_cnt;
14977 14937 mutex_exit(&pptr->port_mutex);
14978 14938
14979 14939 ptgt = fcp_alloc_tgt(pptr, devlist, lcount);
14980 14940 if (ptgt == NULL) {
14981 14941 fcp_log(CE_WARN, pptr->port_dip,
14982 14942 "!FC target allocation failed");
14983 14943 return (ENOMEM);
14984 14944 }
14985 14945
14986 14946 mutex_enter(&pptr->port_mutex);
14987 14947 }
14988 14948
14989 14949 mutex_enter(&ptgt->tgt_mutex);
14990 14950 ptgt->tgt_statec_cause = FCP_CAUSE_USER_CREATE;
14991 14951 ptgt->tgt_tmp_cnt = 1;
14992 14952 ptgt->tgt_device_created = 0;
14993 14953 /*
14994 14954 * If fabric and auto config is set but the target was
14995 14955 * manually unconfigured then reset to the manual_config_only to
14996 14956 * 0 so the device will get configured.
14997 14957 */
14998 14958 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
14999 14959 fcp_enable_auto_configuration &&
15000 14960 ptgt->tgt_manual_config_only == 1) {
15001 14961 old_manual = 1;
15002 14962 ptgt->tgt_manual_config_only = 0;
15003 14963 }
15004 14964 mutex_exit(&ptgt->tgt_mutex);
15005 14965
15006 14966 fcp_update_targets(pptr, devlist, 1,
15007 14967 FCP_LUN_BUSY | FCP_LUN_MARK, FCP_CAUSE_USER_CREATE);
15008 14968
15009 14969 lcount = pptr->port_link_cnt;
15010 14970 tcount = ptgt->tgt_change_cnt;
15011 14971
15012 14972 if (fcp_handle_mapflags(pptr, ptgt, devlist, lcount,
15013 14973 tcount, FCP_CAUSE_USER_CREATE) == TRUE) {
15014 14974 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15015 14975 fcp_enable_auto_configuration && old_manual) {
15016 14976 mutex_enter(&ptgt->tgt_mutex);
15017 14977 ptgt->tgt_manual_config_only = 1;
15018 14978 mutex_exit(&ptgt->tgt_mutex);
15019 14979 }
15020 14980
15021 14981 if (pptr->port_link_cnt != lcount ||
15022 14982 ptgt->tgt_change_cnt != tcount) {
15023 14983 rval = EBUSY;
15024 14984 }
15025 14985 mutex_exit(&pptr->port_mutex);
15026 14986
15027 14987 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15028 14988 FCP_BUF_LEVEL_3, 0,
15029 14989 "fcp_create_on_demand: mapflags ptgt=%x, "
15030 14990 "lcount=%x::port_link_cnt=%x, "
15031 14991 "tcount=%x: tgt_change_cnt=%x, rval=%x",
15032 14992 ptgt, lcount, pptr->port_link_cnt,
15033 14993 tcount, ptgt->tgt_change_cnt, rval);
15034 14994 return (rval);
15035 14995 }
15036 14996
15037 14997 /*
15038 14998 * Due to lack of synchronization mechanisms, we perform
15039 14999 * periodic monitoring of our request; Because requests
15040 15000 * get dropped when another one supercedes (either because
15041 15001 * of a link change or a target change), it is difficult to
15042 15002 * provide a clean synchronization mechanism (such as a
15043 15003 * semaphore or a conditional variable) without exhaustively
15044 15004 * rewriting the mainline discovery code of this driver.
15045 15005 */
15046 15006 wait_ms = 500;
15047 15007
15048 15008 ntries = fcp_max_target_retries;
15049 15009
15050 15010 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15051 15011 FCP_BUF_LEVEL_3, 0,
15052 15012 "fcp_create_on_demand(1): ntries=%x, ptgt=%x, "
15053 15013 "lcount=%x::port_link_cnt=%x, "
15054 15014 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15055 15015 "tgt_tmp_cnt =%x",
15056 15016 ntries, ptgt, lcount, pptr->port_link_cnt,
15057 15017 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15058 15018 ptgt->tgt_tmp_cnt);
15059 15019
15060 15020 mutex_enter(&ptgt->tgt_mutex);
15061 15021 while (ntries-- != 0 && pptr->port_link_cnt == lcount &&
15062 15022 ptgt->tgt_change_cnt == tcount && ptgt->tgt_device_created == 0) {
15063 15023 mutex_exit(&ptgt->tgt_mutex);
15064 15024 mutex_exit(&pptr->port_mutex);
15065 15025
15066 15026 delay(drv_usectohz(wait_ms * 1000));
15067 15027
15068 15028 mutex_enter(&pptr->port_mutex);
15069 15029 mutex_enter(&ptgt->tgt_mutex);
15070 15030 }
15071 15031
15072 15032
15073 15033 if (pptr->port_link_cnt != lcount || ptgt->tgt_change_cnt != tcount) {
15074 15034 rval = EBUSY;
15075 15035 } else {
15076 15036 if (ptgt->tgt_tmp_cnt == 0 && ptgt->tgt_node_state ==
15077 15037 FCP_TGT_NODE_PRESENT) {
15078 15038 rval = 0;
15079 15039 }
15080 15040 }
15081 15041
15082 15042 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15083 15043 FCP_BUF_LEVEL_3, 0,
15084 15044 "fcp_create_on_demand(2): ntries=%x, ptgt=%x, "
15085 15045 "lcount=%x::port_link_cnt=%x, "
15086 15046 "tcount=%x::tgt_change_cnt=%x, rval=%x, tgt_device_created=%x "
15087 15047 "tgt_tmp_cnt =%x",
15088 15048 ntries, ptgt, lcount, pptr->port_link_cnt,
15089 15049 tcount, ptgt->tgt_change_cnt, rval, ptgt->tgt_device_created,
15090 15050 ptgt->tgt_tmp_cnt);
15091 15051
15092 15052 if (rval) {
15093 15053 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15094 15054 fcp_enable_auto_configuration && old_manual) {
15095 15055 ptgt->tgt_manual_config_only = 1;
15096 15056 }
15097 15057 mutex_exit(&ptgt->tgt_mutex);
15098 15058 mutex_exit(&pptr->port_mutex);
15099 15059 kmem_free(devlist, sizeof (*devlist));
15100 15060
15101 15061 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15102 15062 FCP_BUF_LEVEL_3, 0,
15103 15063 "fcp_create_on_demand(3): ntries=%x, ptgt=%x, "
15104 15064 "lcount=%x::port_link_cnt=%x, "
15105 15065 "tcount=%x::tgt_change_cnt=%x, rval=%x, "
15106 15066 "tgt_device_created=%x, tgt D_ID=%x",
15107 15067 ntries, ptgt, lcount, pptr->port_link_cnt,
15108 15068 tcount, ptgt->tgt_change_cnt, rval,
15109 15069 ptgt->tgt_device_created, ptgt->tgt_d_id);
15110 15070 return (rval);
15111 15071 }
15112 15072
15113 15073 if ((plun = ptgt->tgt_lun) != NULL) {
15114 15074 tcount = plun->lun_tgt->tgt_change_cnt;
15115 15075 } else {
15116 15076 rval = EINVAL;
15117 15077 }
15118 15078 lcount = pptr->port_link_cnt;
15119 15079
15120 15080 /*
15121 15081 * Configuring the target with no LUNs will fail. We
15122 15082 * should reset the node state so that it is not
15123 15083 * automatically configured when the LUNs are added
15124 15084 * to this target.
15125 15085 */
15126 15086 if (ptgt->tgt_lun_cnt == 0) {
15127 15087 ptgt->tgt_node_state = FCP_TGT_NODE_NONE;
15128 15088 }
15129 15089 mutex_exit(&ptgt->tgt_mutex);
15130 15090 mutex_exit(&pptr->port_mutex);
15131 15091
15132 15092 while (plun) {
15133 15093 child_info_t *cip;
15134 15094
15135 15095 mutex_enter(&plun->lun_mutex);
15136 15096 cip = plun->lun_cip;
15137 15097 mutex_exit(&plun->lun_mutex);
15138 15098
15139 15099 mutex_enter(&ptgt->tgt_mutex);
15140 15100 if (!(plun->lun_state & FCP_LUN_OFFLINE)) {
15141 15101 mutex_exit(&ptgt->tgt_mutex);
15142 15102
15143 15103 rval = fcp_pass_to_hp_and_wait(pptr, plun, cip,
15144 15104 FCP_ONLINE, lcount, tcount,
15145 15105 NDI_ONLINE_ATTACH);
15146 15106 if (rval != NDI_SUCCESS) {
15147 15107 FCP_TRACE(fcp_logq,
15148 15108 pptr->port_instbuf, fcp_trace,
15149 15109 FCP_BUF_LEVEL_3, 0,
15150 15110 "fcp_create_on_demand: "
15151 15111 "pass_to_hp_and_wait failed "
15152 15112 "rval=%x", rval);
15153 15113 rval = EIO;
15154 15114 } else {
15155 15115 mutex_enter(&LUN_TGT->tgt_mutex);
15156 15116 plun->lun_state &= ~(FCP_LUN_OFFLINE |
15157 15117 FCP_LUN_BUSY);
15158 15118 mutex_exit(&LUN_TGT->tgt_mutex);
15159 15119 }
15160 15120 mutex_enter(&ptgt->tgt_mutex);
15161 15121 }
15162 15122
15163 15123 plun = plun->lun_next;
15164 15124 mutex_exit(&ptgt->tgt_mutex);
15165 15125 }
15166 15126
15167 15127 kmem_free(devlist, sizeof (*devlist));
15168 15128
15169 15129 if (FC_TOP_EXTERNAL(pptr->port_topology) &&
15170 15130 fcp_enable_auto_configuration && old_manual) {
15171 15131 mutex_enter(&ptgt->tgt_mutex);
15172 15132 /* if successful then set manual to 0 */
15173 15133 if (rval == 0) {
15174 15134 ptgt->tgt_manual_config_only = 0;
15175 15135 } else {
15176 15136 /* reset to 1 so the user has to do the config */
15177 15137 ptgt->tgt_manual_config_only = 1;
15178 15138 }
15179 15139 mutex_exit(&ptgt->tgt_mutex);
15180 15140 }
15181 15141
15182 15142 return (rval);
15183 15143 }
15184 15144
15185 15145
15186 15146 static void
15187 15147 fcp_ascii_to_wwn(caddr_t string, uchar_t bytes[], unsigned int byte_len)
15188 15148 {
15189 15149 int count;
15190 15150 uchar_t byte;
15191 15151
15192 15152 count = 0;
15193 15153 while (*string) {
15194 15154 byte = FCP_ATOB(*string); string++;
15195 15155 byte = byte << 4 | FCP_ATOB(*string); string++;
15196 15156 bytes[count++] = byte;
15197 15157
15198 15158 if (count >= byte_len) {
15199 15159 break;
15200 15160 }
15201 15161 }
15202 15162 }
15203 15163
15204 15164 static void
15205 15165 fcp_wwn_to_ascii(uchar_t wwn[], char *string)
15206 15166 {
15207 15167 int i;
15208 15168
15209 15169 for (i = 0; i < FC_WWN_SIZE; i++) {
15210 15170 (void) sprintf(string + (i * 2),
15211 15171 "%02x", wwn[i]);
15212 15172 }
15213 15173
15214 15174 }
15215 15175
15216 15176 static void
15217 15177 fcp_print_error(fc_packet_t *fpkt)
15218 15178 {
15219 15179 struct fcp_ipkt *icmd = (struct fcp_ipkt *)
15220 15180 fpkt->pkt_ulp_private;
15221 15181 struct fcp_port *pptr;
15222 15182 struct fcp_tgt *ptgt;
15223 15183 struct fcp_lun *plun;
15224 15184 caddr_t buf;
15225 15185 int scsi_cmd = 0;
15226 15186
15227 15187 ptgt = icmd->ipkt_tgt;
15228 15188 plun = icmd->ipkt_lun;
15229 15189 pptr = ptgt->tgt_port;
15230 15190
15231 15191 buf = kmem_zalloc(256, KM_NOSLEEP);
15232 15192 if (buf == NULL) {
15233 15193 return;
15234 15194 }
15235 15195
15236 15196 switch (icmd->ipkt_opcode) {
15237 15197 case SCMD_REPORT_LUN:
15238 15198 (void) sprintf(buf, "!REPORT LUN to D_ID=0x%%x"
15239 15199 " lun=0x%%x failed");
15240 15200 scsi_cmd++;
15241 15201 break;
15242 15202
15243 15203 case SCMD_INQUIRY_PAGE83:
15244 15204 (void) sprintf(buf, "!INQUIRY-83 to D_ID=0x%%x"
15245 15205 " lun=0x%%x failed");
15246 15206 scsi_cmd++;
15247 15207 break;
15248 15208
15249 15209 case SCMD_INQUIRY:
15250 15210 (void) sprintf(buf, "!INQUIRY to D_ID=0x%%x"
15251 15211 " lun=0x%%x failed");
15252 15212 scsi_cmd++;
15253 15213 break;
15254 15214
15255 15215 case LA_ELS_PLOGI:
15256 15216 (void) sprintf(buf, "!PLOGI to D_ID=0x%%x failed");
15257 15217 break;
15258 15218
15259 15219 case LA_ELS_PRLI:
15260 15220 (void) sprintf(buf, "!PRLI to D_ID=0x%%x failed");
15261 15221 break;
15262 15222 }
15263 15223
15264 15224 if (scsi_cmd && fpkt->pkt_state == FC_PKT_SUCCESS) {
15265 15225 struct fcp_rsp response, *rsp;
15266 15226 uchar_t asc, ascq;
15267 15227 caddr_t sense_key = NULL;
15268 15228 struct fcp_rsp_info fcp_rsp_err, *bep;
15269 15229
15270 15230 if (icmd->ipkt_nodma) {
15271 15231 rsp = (struct fcp_rsp *)fpkt->pkt_resp;
15272 15232 bep = (struct fcp_rsp_info *)((caddr_t)rsp +
15273 15233 sizeof (struct fcp_rsp));
15274 15234 } else {
15275 15235 rsp = &response;
15276 15236 bep = &fcp_rsp_err;
15277 15237
15278 15238 FCP_CP_IN(fpkt->pkt_resp, rsp, fpkt->pkt_resp_acc,
15279 15239 sizeof (struct fcp_rsp));
15280 15240
15281 15241 FCP_CP_IN(fpkt->pkt_resp + sizeof (struct fcp_rsp),
15282 15242 bep, fpkt->pkt_resp_acc,
15283 15243 sizeof (struct fcp_rsp_info));
15284 15244 }
15285 15245
15286 15246
15287 15247 if (fcp_validate_fcp_response(rsp, pptr) != FC_SUCCESS) {
15288 15248 (void) sprintf(buf + strlen(buf),
15289 15249 " : Bad FCP response values rsvd1=%%x, rsvd2=%%x,"
15290 15250 " sts-rsvd1=%%x, sts-rsvd2=%%x, rsplen=%%x,"
15291 15251 " senselen=%%x. Giving up");
15292 15252
15293 15253 fcp_log(CE_WARN, pptr->port_dip, buf,
15294 15254 ptgt->tgt_d_id, plun->lun_num, rsp->reserved_0,
15295 15255 rsp->reserved_1, rsp->fcp_u.fcp_status.reserved_0,
15296 15256 rsp->fcp_u.fcp_status.reserved_1,
15297 15257 rsp->fcp_response_len, rsp->fcp_sense_len);
15298 15258
15299 15259 kmem_free(buf, 256);
15300 15260 return;
15301 15261 }
15302 15262
15303 15263 if (rsp->fcp_u.fcp_status.rsp_len_set &&
15304 15264 bep->rsp_code != FCP_NO_FAILURE) {
15305 15265 (void) sprintf(buf + strlen(buf),
15306 15266 " FCP Response code = 0x%x", bep->rsp_code);
15307 15267 }
15308 15268
15309 15269 if (rsp->fcp_u.fcp_status.scsi_status & STATUS_CHECK) {
15310 15270 struct scsi_extended_sense sense_info, *sense_ptr;
15311 15271
15312 15272 if (icmd->ipkt_nodma) {
15313 15273 sense_ptr = (struct scsi_extended_sense *)
15314 15274 ((caddr_t)fpkt->pkt_resp +
15315 15275 sizeof (struct fcp_rsp) +
15316 15276 rsp->fcp_response_len);
15317 15277 } else {
15318 15278 sense_ptr = &sense_info;
15319 15279
15320 15280 FCP_CP_IN(fpkt->pkt_resp +
15321 15281 sizeof (struct fcp_rsp) +
15322 15282 rsp->fcp_response_len, &sense_info,
15323 15283 fpkt->pkt_resp_acc,
15324 15284 sizeof (struct scsi_extended_sense));
15325 15285 }
15326 15286
15327 15287 if (sense_ptr->es_key < NUM_SENSE_KEYS +
15328 15288 NUM_IMPL_SENSE_KEYS) {
15329 15289 sense_key = sense_keys[sense_ptr->es_key];
15330 15290 } else {
15331 15291 sense_key = "Undefined";
15332 15292 }
15333 15293
15334 15294 asc = sense_ptr->es_add_code;
15335 15295 ascq = sense_ptr->es_qual_code;
15336 15296
15337 15297 (void) sprintf(buf + strlen(buf),
15338 15298 ": sense key=%%s, ASC=%%x," " ASCQ=%%x."
15339 15299 " Giving up");
15340 15300
15341 15301 fcp_log(CE_WARN, pptr->port_dip, buf,
15342 15302 ptgt->tgt_d_id, plun->lun_num, sense_key,
15343 15303 asc, ascq);
15344 15304 } else {
15345 15305 (void) sprintf(buf + strlen(buf),
15346 15306 " : SCSI status=%%x. Giving up");
15347 15307
15348 15308 fcp_log(CE_WARN, pptr->port_dip, buf,
15349 15309 ptgt->tgt_d_id, plun->lun_num,
15350 15310 rsp->fcp_u.fcp_status.scsi_status);
15351 15311 }
15352 15312 } else {
15353 15313 caddr_t state, reason, action, expln;
15354 15314
15355 15315 (void) fc_ulp_pkt_error(fpkt, &state, &reason,
15356 15316 &action, &expln);
15357 15317
15358 15318 (void) sprintf(buf + strlen(buf), ": State:%%s,"
15359 15319 " Reason:%%s. Giving up");
15360 15320
15361 15321 if (scsi_cmd) {
15362 15322 fcp_log(CE_WARN, pptr->port_dip, buf,
15363 15323 ptgt->tgt_d_id, plun->lun_num, state, reason);
15364 15324 } else {
15365 15325 fcp_log(CE_WARN, pptr->port_dip, buf,
15366 15326 ptgt->tgt_d_id, state, reason);
15367 15327 }
15368 15328 }
15369 15329
15370 15330 kmem_free(buf, 256);
15371 15331 }
15372 15332
15373 15333
15374 15334 static int
15375 15335 fcp_handle_ipkt_errors(struct fcp_port *pptr, struct fcp_tgt *ptgt,
15376 15336 struct fcp_ipkt *icmd, int rval, caddr_t op)
15377 15337 {
15378 15338 int ret = DDI_FAILURE;
15379 15339 char *error;
15380 15340
15381 15341 switch (rval) {
15382 15342 case FC_DEVICE_BUSY_NEW_RSCN:
15383 15343 /*
15384 15344 * This means that there was a new RSCN that the transport
15385 15345 * knows about (which the ULP *may* know about too) but the
15386 15346 * pkt that was sent down was related to an older RSCN. So, we
15387 15347 * are just going to reset the retry count and deadline and
15388 15348 * continue to retry. The idea is that transport is currently
15389 15349 * working on the new RSCN and will soon let the ULPs know
15390 15350 * about it and when it does the existing logic will kick in
15391 15351 * where it will change the tcount to indicate that something
15392 15352 * changed on the target. So, rediscovery will start and there
15393 15353 * will not be an infinite retry.
15394 15354 *
15395 15355 * For a full flow of how the RSCN info is transferred back and
15396 15356 * forth, see fp.c
15397 15357 */
15398 15358 icmd->ipkt_retries = 0;
15399 15359 icmd->ipkt_port->port_deadline = fcp_watchdog_time +
15400 15360 FCP_ICMD_DEADLINE;
15401 15361
15402 15362 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15403 15363 FCP_BUF_LEVEL_3, 0,
15404 15364 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15405 15365 rval, ptgt->tgt_d_id);
15406 15366 /* FALLTHROUGH */
15407 15367
15408 15368 case FC_STATEC_BUSY:
15409 15369 case FC_DEVICE_BUSY:
15410 15370 case FC_PBUSY:
15411 15371 case FC_FBUSY:
15412 15372 case FC_TRAN_BUSY:
15413 15373 case FC_OFFLINE:
15414 15374 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15415 15375 FCP_BUF_LEVEL_3, 0,
15416 15376 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15417 15377 rval, ptgt->tgt_d_id);
15418 15378 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15419 15379 fcp_is_retryable(icmd)) {
15420 15380 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15421 15381 ret = DDI_SUCCESS;
15422 15382 }
15423 15383 break;
15424 15384
15425 15385 case FC_LOGINREQ:
15426 15386 /*
15427 15387 * FC_LOGINREQ used to be handled just like all the cases
15428 15388 * above. It has been changed to handled a PRLI that fails
15429 15389 * with FC_LOGINREQ different than other ipkts that fail
15430 15390 * with FC_LOGINREQ. If a PRLI fails with FC_LOGINREQ it is
15431 15391 * a simple matter to turn it into a PLOGI instead, so that's
15432 15392 * exactly what we do here.
15433 15393 */
15434 15394 if (icmd->ipkt_opcode == LA_ELS_PRLI) {
15435 15395 ret = fcp_send_els(icmd->ipkt_port, icmd->ipkt_tgt,
15436 15396 icmd, LA_ELS_PLOGI, icmd->ipkt_link_cnt,
15437 15397 icmd->ipkt_change_cnt, icmd->ipkt_cause);
15438 15398 } else {
15439 15399 FCP_TRACE(fcp_logq, pptr->port_instbuf, fcp_trace,
15440 15400 FCP_BUF_LEVEL_3, 0,
15441 15401 "fcp_handle_ipkt_errors: rval=%x for D_ID=%x",
15442 15402 rval, ptgt->tgt_d_id);
15443 15403 if (icmd->ipkt_retries < FCP_MAX_RETRIES &&
15444 15404 fcp_is_retryable(icmd)) {
15445 15405 fcp_queue_ipkt(pptr, icmd->ipkt_fpkt);
15446 15406 ret = DDI_SUCCESS;
15447 15407 }
15448 15408 }
15449 15409 break;
15450 15410
15451 15411 default:
15452 15412 mutex_enter(&pptr->port_mutex);
15453 15413 mutex_enter(&ptgt->tgt_mutex);
15454 15414 if (!FCP_STATE_CHANGED(pptr, ptgt, icmd)) {
15455 15415 mutex_exit(&ptgt->tgt_mutex);
15456 15416 mutex_exit(&pptr->port_mutex);
15457 15417
15458 15418 (void) fc_ulp_error(rval, &error);
15459 15419 fcp_log(CE_WARN, pptr->port_dip,
15460 15420 "!Failed to send %s to D_ID=%x error=%s",
15461 15421 op, ptgt->tgt_d_id, error);
15462 15422 } else {
15463 15423 FCP_TRACE(fcp_logq, pptr->port_instbuf,
15464 15424 fcp_trace, FCP_BUF_LEVEL_2, 0,
15465 15425 "fcp_handle_ipkt_errors,1: state change occured"
15466 15426 " for D_ID=0x%x", ptgt->tgt_d_id);
15467 15427 mutex_exit(&ptgt->tgt_mutex);
15468 15428 mutex_exit(&pptr->port_mutex);
15469 15429 }
15470 15430 break;
15471 15431 }
15472 15432
15473 15433 return (ret);
15474 15434 }
15475 15435
15476 15436
15477 15437 /*
15478 15438 * Check of outstanding commands on any LUN for this target
15479 15439 */
15480 15440 static int
15481 15441 fcp_outstanding_lun_cmds(struct fcp_tgt *ptgt)
15482 15442 {
15483 15443 struct fcp_lun *plun;
15484 15444 struct fcp_pkt *cmd;
15485 15445
15486 15446 for (plun = ptgt->tgt_lun; plun != NULL; plun = plun->lun_next) {
15487 15447 mutex_enter(&plun->lun_mutex);
15488 15448 for (cmd = plun->lun_pkt_head; cmd != NULL;
15489 15449 cmd = cmd->cmd_forw) {
15490 15450 if (cmd->cmd_state == FCP_PKT_ISSUED) {
15491 15451 mutex_exit(&plun->lun_mutex);
15492 15452 return (FC_SUCCESS);
15493 15453 }
15494 15454 }
15495 15455 mutex_exit(&plun->lun_mutex);
15496 15456 }
15497 15457
15498 15458 return (FC_FAILURE);
15499 15459 }
15500 15460
15501 15461 static fc_portmap_t *
15502 15462 fcp_construct_map(struct fcp_port *pptr, uint32_t *dev_cnt)
15503 15463 {
15504 15464 int i;
15505 15465 fc_portmap_t *devlist;
15506 15466 fc_portmap_t *devptr = NULL;
15507 15467 struct fcp_tgt *ptgt;
15508 15468
15509 15469 mutex_enter(&pptr->port_mutex);
15510 15470 for (i = 0, *dev_cnt = 0; i < FCP_NUM_HASH; i++) {
15511 15471 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15512 15472 ptgt = ptgt->tgt_next) {
15513 15473 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15514 15474 ++*dev_cnt;
15515 15475 }
15516 15476 }
15517 15477 }
15518 15478
15519 15479 devptr = devlist = kmem_zalloc(sizeof (*devlist) * *dev_cnt,
15520 15480 KM_NOSLEEP);
15521 15481 if (devlist == NULL) {
15522 15482 mutex_exit(&pptr->port_mutex);
15523 15483 fcp_log(CE_WARN, pptr->port_dip,
15524 15484 "!fcp%d: failed to allocate for portmap for construct map",
15525 15485 pptr->port_instance);
15526 15486 return (devptr);
15527 15487 }
15528 15488
15529 15489 for (i = 0; i < FCP_NUM_HASH; i++) {
15530 15490 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15531 15491 ptgt = ptgt->tgt_next) {
15532 15492 if (!(ptgt->tgt_state & FCP_TGT_ORPHAN)) {
15533 15493 int ret;
15534 15494
15535 15495 ret = fc_ulp_pwwn_to_portmap(
15536 15496 pptr->port_fp_handle,
15537 15497 (la_wwn_t *)&ptgt->tgt_port_wwn.raw_wwn[0],
15538 15498 devlist);
15539 15499
15540 15500 if (ret == FC_SUCCESS) {
15541 15501 devlist++;
15542 15502 continue;
15543 15503 }
15544 15504
15545 15505 devlist->map_pd = NULL;
15546 15506 devlist->map_did.port_id = ptgt->tgt_d_id;
15547 15507 devlist->map_hard_addr.hard_addr =
15548 15508 ptgt->tgt_hard_addr;
15549 15509
15550 15510 devlist->map_state = PORT_DEVICE_INVALID;
15551 15511 devlist->map_type = PORT_DEVICE_OLD;
15552 15512
15553 15513 bcopy(&ptgt->tgt_node_wwn.raw_wwn[0],
15554 15514 &devlist->map_nwwn, FC_WWN_SIZE);
15555 15515
15556 15516 bcopy(&ptgt->tgt_port_wwn.raw_wwn[0],
15557 15517 &devlist->map_pwwn, FC_WWN_SIZE);
15558 15518
15559 15519 devlist++;
15560 15520 }
15561 15521 }
15562 15522 }
15563 15523
15564 15524 mutex_exit(&pptr->port_mutex);
15565 15525
15566 15526 return (devptr);
15567 15527 }
15568 15528 /*
15569 15529 * Inimate MPxIO that the lun is busy and cannot accept regular IO
15570 15530 */
15571 15531 static void
15572 15532 fcp_update_mpxio_path_verifybusy(struct fcp_port *pptr)
15573 15533 {
15574 15534 int i;
15575 15535 struct fcp_tgt *ptgt;
15576 15536 struct fcp_lun *plun;
15577 15537
15578 15538 for (i = 0; i < FCP_NUM_HASH; i++) {
15579 15539 for (ptgt = pptr->port_tgt_hash_table[i]; ptgt != NULL;
15580 15540 ptgt = ptgt->tgt_next) {
15581 15541 mutex_enter(&ptgt->tgt_mutex);
15582 15542 for (plun = ptgt->tgt_lun; plun != NULL;
15583 15543 plun = plun->lun_next) {
15584 15544 if (plun->lun_mpxio &&
15585 15545 plun->lun_state & FCP_LUN_BUSY) {
15586 15546 if (!fcp_pass_to_hp(pptr, plun,
15587 15547 plun->lun_cip,
15588 15548 FCP_MPXIO_PATH_SET_BUSY,
15589 15549 pptr->port_link_cnt,
15590 15550 ptgt->tgt_change_cnt, 0, 0)) {
15591 15551 FCP_TRACE(fcp_logq,
15592 15552 pptr->port_instbuf,
15593 15553 fcp_trace,
15594 15554 FCP_BUF_LEVEL_2, 0,
15595 15555 "path_verifybusy: "
15596 15556 "disable lun %p failed!",
15597 15557 plun);
15598 15558 }
15599 15559 }
15600 15560 }
15601 15561 mutex_exit(&ptgt->tgt_mutex);
15602 15562 }
15603 15563 }
15604 15564 }
15605 15565
15606 15566 static int
15607 15567 fcp_update_mpxio_path(struct fcp_lun *plun, child_info_t *cip, int what)
15608 15568 {
15609 15569 dev_info_t *cdip = NULL;
15610 15570 dev_info_t *pdip = NULL;
15611 15571
15612 15572 ASSERT(plun);
15613 15573
15614 15574 mutex_enter(&plun->lun_mutex);
15615 15575 if (fcp_is_child_present(plun, cip) == FC_FAILURE) {
15616 15576 mutex_exit(&plun->lun_mutex);
15617 15577 return (NDI_FAILURE);
15618 15578 }
15619 15579 mutex_exit(&plun->lun_mutex);
15620 15580 cdip = mdi_pi_get_client(PIP(cip));
15621 15581 pdip = mdi_pi_get_phci(PIP(cip));
15622 15582
15623 15583 ASSERT(cdip != NULL);
15624 15584 ASSERT(pdip != NULL);
15625 15585
15626 15586 if (what == FCP_MPXIO_PATH_CLEAR_BUSY) {
15627 15587 /* LUN ready for IO */
15628 15588 (void) mdi_pi_enable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15629 15589 } else {
15630 15590 /* LUN busy to accept IO */
15631 15591 (void) mdi_pi_disable_path(PIP(cip), DRIVER_DISABLE_TRANSIENT);
15632 15592 }
15633 15593 return (NDI_SUCCESS);
15634 15594 }
15635 15595
15636 15596 /*
15637 15597 * Caller must free the returned string of MAXPATHLEN len
15638 15598 * If the device is offline (-1 instance number) NULL
15639 15599 * will be returned.
15640 15600 */
15641 15601 static char *
15642 15602 fcp_get_lun_path(struct fcp_lun *plun)
15643 15603 {
15644 15604 dev_info_t *dip = NULL;
15645 15605 char *path = NULL;
15646 15606 mdi_pathinfo_t *pip = NULL;
15647 15607
15648 15608 if (plun == NULL) {
15649 15609 return (NULL);
15650 15610 }
15651 15611
15652 15612 mutex_enter(&plun->lun_mutex);
15653 15613 if (plun->lun_mpxio == 0) {
15654 15614 dip = DIP(plun->lun_cip);
15655 15615 mutex_exit(&plun->lun_mutex);
15656 15616 } else {
15657 15617 /*
15658 15618 * lun_cip must be accessed with lun_mutex held. Here
15659 15619 * plun->lun_cip either points to a valid node or it is NULL.
15660 15620 * Make a copy so that we can release lun_mutex.
15661 15621 */
15662 15622 pip = PIP(plun->lun_cip);
15663 15623
15664 15624 /*
15665 15625 * Increase ref count on the path so that we can release
15666 15626 * lun_mutex and still be sure that the pathinfo node (and thus
15667 15627 * also the client) is not deallocated. If pip is NULL, this
15668 15628 * has no effect.
15669 15629 */
15670 15630 mdi_hold_path(pip);
15671 15631
15672 15632 mutex_exit(&plun->lun_mutex);
15673 15633
15674 15634 /* Get the client. If pip is NULL, we get NULL. */
15675 15635 dip = mdi_pi_get_client(pip);
15676 15636 }
15677 15637
15678 15638 if (dip == NULL)
15679 15639 goto out;
15680 15640 if (ddi_get_instance(dip) < 0)
15681 15641 goto out;
15682 15642
15683 15643 path = kmem_alloc(MAXPATHLEN, KM_SLEEP);
15684 15644 if (path == NULL)
15685 15645 goto out;
15686 15646
15687 15647 (void) ddi_pathname(dip, path);
15688 15648
15689 15649 /* Clean up. */
15690 15650 out:
15691 15651 if (pip != NULL)
15692 15652 mdi_rele_path(pip);
15693 15653
15694 15654 /*
15695 15655 * In reality, the user wants a fully valid path (one they can open)
15696 15656 * but this string is lacking the mount point, and the minor node.
15697 15657 * It would be nice if we could "figure these out" somehow
15698 15658 * and fill them in. Otherwise, the userland code has to understand
15699 15659 * driver specific details of which minor node is the "best" or
15700 15660 * "right" one to expose. (Ex: which slice is the whole disk, or
15701 15661 * which tape doesn't rewind)
15702 15662 */
15703 15663 return (path);
15704 15664 }
15705 15665
15706 15666 static int
15707 15667 fcp_scsi_bus_config(dev_info_t *parent, uint_t flag,
15708 15668 ddi_bus_config_op_t op, void *arg, dev_info_t **childp)
15709 15669 {
15710 15670 int64_t reset_delay;
15711 15671 int rval, retry = 0;
15712 15672 struct fcp_port *pptr = fcp_dip2port(parent);
15713 15673
15714 15674 reset_delay = (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15715 15675 (ddi_get_lbolt64() - pptr->port_attach_time);
15716 15676 if (reset_delay < 0) {
15717 15677 reset_delay = 0;
15718 15678 }
15719 15679
15720 15680 if (fcp_bus_config_debug) {
15721 15681 flag |= NDI_DEVI_DEBUG;
15722 15682 }
15723 15683
15724 15684 switch (op) {
15725 15685 case BUS_CONFIG_ONE:
15726 15686 /*
15727 15687 * Retry the command since we need to ensure
15728 15688 * the fabric devices are available for root
15729 15689 */
15730 15690 while (retry++ < fcp_max_bus_config_retries) {
15731 15691 rval = (ndi_busop_bus_config(parent,
15732 15692 flag | NDI_MDI_FALLBACK, op,
15733 15693 arg, childp, (clock_t)reset_delay));
15734 15694 if (rval == 0) {
15735 15695 return (rval);
15736 15696 }
15737 15697 }
15738 15698
15739 15699 /*
15740 15700 * drain taskq to make sure nodes are created and then
15741 15701 * try again.
15742 15702 */
15743 15703 taskq_wait(DEVI(parent)->devi_taskq);
15744 15704 return (ndi_busop_bus_config(parent, flag | NDI_MDI_FALLBACK,
15745 15705 op, arg, childp, 0));
15746 15706
15747 15707 case BUS_CONFIG_DRIVER:
15748 15708 case BUS_CONFIG_ALL: {
15749 15709 /*
15750 15710 * delay till all devices report in (port_tmp_cnt == 0)
15751 15711 * or FCP_INIT_WAIT_TIMEOUT
15752 15712 */
15753 15713 mutex_enter(&pptr->port_mutex);
15754 15714 while ((reset_delay > 0) && pptr->port_tmp_cnt) {
15755 15715 (void) cv_timedwait(&pptr->port_config_cv,
15756 15716 &pptr->port_mutex,
15757 15717 ddi_get_lbolt() + (clock_t)reset_delay);
15758 15718 reset_delay =
15759 15719 (int64_t)(USEC_TO_TICK(FCP_INIT_WAIT_TIMEOUT)) -
15760 15720 (ddi_get_lbolt64() - pptr->port_attach_time);
15761 15721 }
15762 15722 mutex_exit(&pptr->port_mutex);
15763 15723 /* drain taskq to make sure nodes are created */
15764 15724 taskq_wait(DEVI(parent)->devi_taskq);
15765 15725 return (ndi_busop_bus_config(parent, flag, op,
15766 15726 arg, childp, 0));
15767 15727 }
15768 15728
15769 15729 default:
15770 15730 return (NDI_FAILURE);
15771 15731 }
15772 15732 /*NOTREACHED*/
15773 15733 }
15774 15734
15775 15735 static int
15776 15736 fcp_scsi_bus_unconfig(dev_info_t *parent, uint_t flag,
15777 15737 ddi_bus_config_op_t op, void *arg)
15778 15738 {
15779 15739 if (fcp_bus_config_debug) {
15780 15740 flag |= NDI_DEVI_DEBUG;
15781 15741 }
15782 15742
15783 15743 return (ndi_busop_bus_unconfig(parent, flag, op, arg));
15784 15744 }
15785 15745
15786 15746
15787 15747 /*
15788 15748 * Routine to copy GUID into the lun structure.
15789 15749 * returns 0 if copy was successful and 1 if encountered a
15790 15750 * failure and did not copy the guid.
15791 15751 */
15792 15752 static int
15793 15753 fcp_copy_guid_2_lun_block(struct fcp_lun *plun, char *guidp)
15794 15754 {
15795 15755
15796 15756 int retval = 0;
15797 15757
15798 15758 /* add one for the null terminator */
15799 15759 const unsigned int len = strlen(guidp) + 1;
15800 15760
15801 15761 if ((guidp == NULL) || (plun == NULL)) {
15802 15762 return (1);
15803 15763 }
15804 15764
15805 15765 /*
15806 15766 * if the plun->lun_guid already has been allocated,
15807 15767 * then check the size. if the size is exact, reuse
15808 15768 * it....if not free it an allocate the required size.
15809 15769 * The reallocation should NOT typically happen
15810 15770 * unless the GUIDs reported changes between passes.
15811 15771 * We free up and alloc again even if the
15812 15772 * size was more than required. This is due to the
15813 15773 * fact that the field lun_guid_size - serves
15814 15774 * dual role of indicating the size of the wwn
15815 15775 * size and ALSO the allocation size.
15816 15776 */
15817 15777 if (plun->lun_guid) {
15818 15778 if (plun->lun_guid_size != len) {
15819 15779 /*
15820 15780 * free the allocated memory and
15821 15781 * initialize the field
15822 15782 * lun_guid_size to 0.
15823 15783 */
15824 15784 kmem_free(plun->lun_guid, plun->lun_guid_size);
15825 15785 plun->lun_guid = NULL;
15826 15786 plun->lun_guid_size = 0;
15827 15787 }
15828 15788 }
15829 15789 /*
15830 15790 * alloc only if not already done.
15831 15791 */
15832 15792 if (plun->lun_guid == NULL) {
15833 15793 plun->lun_guid = kmem_zalloc(len, KM_NOSLEEP);
15834 15794 if (plun->lun_guid == NULL) {
15835 15795 cmn_err(CE_WARN, "fcp_copy_guid_2_lun_block:"
15836 15796 "Unable to allocate"
15837 15797 "Memory for GUID!!! size %d", len);
15838 15798 retval = 1;
15839 15799 } else {
15840 15800 plun->lun_guid_size = len;
15841 15801 }
15842 15802 }
15843 15803 if (plun->lun_guid) {
15844 15804 /*
15845 15805 * now copy the GUID
15846 15806 */
15847 15807 bcopy(guidp, plun->lun_guid, plun->lun_guid_size);
15848 15808 }
15849 15809 return (retval);
15850 15810 }
15851 15811
15852 15812 /*
15853 15813 * fcp_reconfig_wait
15854 15814 *
15855 15815 * Wait for a rediscovery/reconfiguration to complete before continuing.
15856 15816 */
15857 15817
15858 15818 static void
15859 15819 fcp_reconfig_wait(struct fcp_port *pptr)
15860 15820 {
15861 15821 clock_t reconfig_start, wait_timeout;
15862 15822
15863 15823 /*
15864 15824 * Quick check. If pptr->port_tmp_cnt is 0, there is no
15865 15825 * reconfiguration in progress.
15866 15826 */
15867 15827
15868 15828 mutex_enter(&pptr->port_mutex);
15869 15829 if (pptr->port_tmp_cnt == 0) {
15870 15830 mutex_exit(&pptr->port_mutex);
15871 15831 return;
15872 15832 }
15873 15833 mutex_exit(&pptr->port_mutex);
15874 15834
15875 15835 /*
15876 15836 * If we cause a reconfig by raising power, delay until all devices
15877 15837 * report in (port_tmp_cnt returns to 0)
15878 15838 */
15879 15839
15880 15840 reconfig_start = ddi_get_lbolt();
15881 15841 wait_timeout = drv_usectohz(FCP_INIT_WAIT_TIMEOUT);
15882 15842
15883 15843 mutex_enter(&pptr->port_mutex);
15884 15844
15885 15845 while (((ddi_get_lbolt() - reconfig_start) < wait_timeout) &&
15886 15846 pptr->port_tmp_cnt) {
15887 15847
15888 15848 (void) cv_timedwait(&pptr->port_config_cv, &pptr->port_mutex,
15889 15849 reconfig_start + wait_timeout);
15890 15850 }
15891 15851
15892 15852 mutex_exit(&pptr->port_mutex);
15893 15853
15894 15854 /*
15895 15855 * Even if fcp_tmp_count isn't 0, continue without error. The port
15896 15856 * we want may still be ok. If not, it will error out later
15897 15857 */
15898 15858 }
15899 15859
15900 15860 /*
15901 15861 * Read masking info from fp.conf and construct the global fcp_lun_blacklist.
15902 15862 * We rely on the fcp_global_mutex to provide protection against changes to
15903 15863 * the fcp_lun_blacklist.
15904 15864 *
15905 15865 * You can describe a list of target port WWNs and LUN numbers which will
15906 15866 * not be configured. LUN numbers will be interpreted as decimal. White
15907 15867 * spaces and ',' can be used in the list of LUN numbers.
15908 15868 *
15909 15869 * To prevent LUNs 1 and 2 from being configured for target
15910 15870 * port 510000f010fd92a1 and target port 510000e012079df1, set:
15911 15871 *
15912 15872 * pwwn-lun-blacklist=
15913 15873 * "510000f010fd92a1,1,2",
15914 15874 * "510000e012079df1,1,2";
15915 15875 */
15916 15876 static void
15917 15877 fcp_read_blacklist(dev_info_t *dip,
15918 15878 struct fcp_black_list_entry **pplun_blacklist)
15919 15879 {
15920 15880 char **prop_array = NULL;
15921 15881 char *curr_pwwn = NULL;
15922 15882 char *curr_lun = NULL;
15923 15883 uint32_t prop_item = 0;
15924 15884 int idx = 0;
15925 15885 int len = 0;
15926 15886
15927 15887 ASSERT(mutex_owned(&fcp_global_mutex));
15928 15888 if (ddi_prop_lookup_string_array(DDI_DEV_T_ANY, dip,
15929 15889 DDI_PROP_DONTPASS | DDI_PROP_NOTPROM,
15930 15890 LUN_BLACKLIST_PROP, &prop_array, &prop_item) != DDI_PROP_SUCCESS) {
15931 15891 return;
15932 15892 }
15933 15893
15934 15894 for (idx = 0; idx < prop_item; idx++) {
15935 15895
15936 15896 curr_pwwn = prop_array[idx];
15937 15897 while (*curr_pwwn == ' ') {
15938 15898 curr_pwwn++;
15939 15899 }
15940 15900 if (strlen(curr_pwwn) <= (sizeof (la_wwn_t) * 2 + 1)) {
15941 15901 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15942 15902 ", please check.", curr_pwwn);
15943 15903 continue;
15944 15904 }
15945 15905 if ((*(curr_pwwn + sizeof (la_wwn_t) * 2) != ' ') &&
15946 15906 (*(curr_pwwn + sizeof (la_wwn_t) * 2) != ',')) {
15947 15907 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the blacklist"
15948 15908 ", please check.", curr_pwwn);
15949 15909 continue;
15950 15910 }
15951 15911 for (len = 0; len < sizeof (la_wwn_t) * 2; len++) {
15952 15912 if (isxdigit(curr_pwwn[len]) != TRUE) {
15953 15913 fcp_log(CE_WARN, NULL, "Invalid WWN %s in the "
15954 15914 "blacklist, please check.", curr_pwwn);
15955 15915 break;
15956 15916 }
15957 15917 }
15958 15918 if (len != sizeof (la_wwn_t) * 2) {
15959 15919 continue;
15960 15920 }
15961 15921
15962 15922 curr_lun = curr_pwwn + sizeof (la_wwn_t) * 2 + 1;
15963 15923 *(curr_lun - 1) = '\0';
15964 15924 fcp_mask_pwwn_lun(curr_pwwn, curr_lun, pplun_blacklist);
15965 15925 }
15966 15926
15967 15927 ddi_prop_free(prop_array);
15968 15928 }
15969 15929
15970 15930 /*
15971 15931 * Get the masking info about one remote target port designated by wwn.
15972 15932 * Lun ids could be separated by ',' or white spaces.
15973 15933 */
15974 15934 static void
15975 15935 fcp_mask_pwwn_lun(char *curr_pwwn, char *curr_lun,
15976 15936 struct fcp_black_list_entry **pplun_blacklist)
15977 15937 {
15978 15938 int idx = 0;
15979 15939 uint32_t offset = 0;
15980 15940 unsigned long lun_id = 0;
15981 15941 char lunid_buf[16];
15982 15942 char *pend = NULL;
15983 15943 int illegal_digit = 0;
15984 15944
15985 15945 while (offset < strlen(curr_lun)) {
15986 15946 while ((curr_lun[offset + idx] != ',') &&
15987 15947 (curr_lun[offset + idx] != '\0') &&
15988 15948 (curr_lun[offset + idx] != ' ')) {
15989 15949 if (isdigit(curr_lun[offset + idx]) == 0) {
15990 15950 illegal_digit++;
15991 15951 }
15992 15952 idx++;
15993 15953 }
15994 15954 if (illegal_digit > 0) {
15995 15955 offset += (idx+1); /* To the start of next lun */
15996 15956 idx = 0;
15997 15957 illegal_digit = 0;
15998 15958 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
15999 15959 "the blacklist, please check digits.",
16000 15960 curr_lun, curr_pwwn);
16001 15961 continue;
16002 15962 }
16003 15963 if (idx >= (sizeof (lunid_buf) / sizeof (lunid_buf[0]))) {
16004 15964 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16005 15965 "the blacklist, please check the length of LUN#.",
16006 15966 curr_lun, curr_pwwn);
16007 15967 break;
16008 15968 }
16009 15969 if (idx == 0) { /* ignore ' ' or ',' or '\0' */
16010 15970 offset++;
16011 15971 continue;
16012 15972 }
16013 15973
16014 15974 bcopy(curr_lun + offset, lunid_buf, idx);
16015 15975 lunid_buf[idx] = '\0';
16016 15976 if (ddi_strtoul(lunid_buf, &pend, 10, &lun_id) == 0) {
16017 15977 fcp_add_one_mask(curr_pwwn, lun_id, pplun_blacklist);
16018 15978 } else {
16019 15979 fcp_log(CE_WARN, NULL, "Invalid LUN %s for WWN %s in "
16020 15980 "the blacklist, please check %s.",
16021 15981 curr_lun, curr_pwwn, lunid_buf);
16022 15982 }
16023 15983 offset += (idx+1); /* To the start of next lun */
16024 15984 idx = 0;
16025 15985 }
16026 15986 }
16027 15987
16028 15988 /*
16029 15989 * Add one masking record
16030 15990 */
16031 15991 static void
16032 15992 fcp_add_one_mask(char *curr_pwwn, uint32_t lun_id,
16033 15993 struct fcp_black_list_entry **pplun_blacklist)
16034 15994 {
16035 15995 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16036 15996 struct fcp_black_list_entry *new_entry = NULL;
16037 15997 la_wwn_t wwn;
16038 15998
16039 15999 fcp_ascii_to_wwn(curr_pwwn, wwn.raw_wwn, sizeof (la_wwn_t));
16040 16000 while (tmp_entry) {
16041 16001 if ((bcmp(&tmp_entry->wwn, &wwn,
16042 16002 sizeof (la_wwn_t)) == 0) && (tmp_entry->lun == lun_id)) {
16043 16003 return;
16044 16004 }
16045 16005
16046 16006 tmp_entry = tmp_entry->next;
16047 16007 }
16048 16008
16049 16009 /* add to black list */
16050 16010 new_entry = (struct fcp_black_list_entry *)kmem_zalloc
16051 16011 (sizeof (struct fcp_black_list_entry), KM_SLEEP);
16052 16012 bcopy(&wwn, &new_entry->wwn, sizeof (la_wwn_t));
16053 16013 new_entry->lun = lun_id;
16054 16014 new_entry->masked = 0;
16055 16015 new_entry->next = *pplun_blacklist;
16056 16016 *pplun_blacklist = new_entry;
16057 16017 }
16058 16018
16059 16019 /*
16060 16020 * Check if we should mask the specified lun of this fcp_tgt
16061 16021 */
16062 16022 static int
16063 16023 fcp_should_mask(la_wwn_t *wwn, uint32_t lun_id)
16064 16024 {
16065 16025 struct fcp_black_list_entry *remote_port;
16066 16026
16067 16027 remote_port = fcp_lun_blacklist;
16068 16028 while (remote_port != NULL) {
16069 16029 if (bcmp(wwn, &remote_port->wwn, sizeof (la_wwn_t)) == 0) {
16070 16030 if (remote_port->lun == lun_id) {
16071 16031 remote_port->masked++;
16072 16032 if (remote_port->masked == 1) {
16073 16033 fcp_log(CE_NOTE, NULL, "LUN %d of port "
16074 16034 "%02x%02x%02x%02x%02x%02x%02x%02x "
16075 16035 "is masked due to black listing.\n",
16076 16036 lun_id, wwn->raw_wwn[0],
16077 16037 wwn->raw_wwn[1], wwn->raw_wwn[2],
16078 16038 wwn->raw_wwn[3], wwn->raw_wwn[4],
16079 16039 wwn->raw_wwn[5], wwn->raw_wwn[6],
16080 16040 wwn->raw_wwn[7]);
16081 16041 }
16082 16042 return (TRUE);
16083 16043 }
16084 16044 }
16085 16045 remote_port = remote_port->next;
16086 16046 }
16087 16047 return (FALSE);
16088 16048 }
16089 16049
16090 16050 /*
16091 16051 * Release all allocated resources
16092 16052 */
16093 16053 static void
16094 16054 fcp_cleanup_blacklist(struct fcp_black_list_entry **pplun_blacklist)
16095 16055 {
16096 16056 struct fcp_black_list_entry *tmp_entry = *pplun_blacklist;
16097 16057 struct fcp_black_list_entry *current_entry = NULL;
16098 16058
16099 16059 ASSERT(mutex_owned(&fcp_global_mutex));
16100 16060 /*
16101 16061 * Traverse all luns
16102 16062 */
16103 16063 while (tmp_entry) {
16104 16064 current_entry = tmp_entry;
16105 16065 tmp_entry = tmp_entry->next;
16106 16066 kmem_free(current_entry, sizeof (struct fcp_black_list_entry));
16107 16067 }
16108 16068 *pplun_blacklist = NULL;
16109 16069 }
16110 16070
16111 16071 /*
16112 16072 * In fcp module,
16113 16073 * pkt@scsi_pkt, cmd@fcp_pkt, icmd@fcp_ipkt, fpkt@fc_packet, pptr@fcp_port
16114 16074 */
16115 16075 static struct scsi_pkt *
16116 16076 fcp_pseudo_init_pkt(struct scsi_address *ap, struct scsi_pkt *pkt,
16117 16077 struct buf *bp, int cmdlen, int statuslen, int tgtlen,
16118 16078 int flags, int (*callback)(), caddr_t arg)
16119 16079 {
16120 16080 fcp_port_t *pptr = ADDR2FCP(ap);
16121 16081 fcp_pkt_t *cmd = NULL;
16122 16082 fc_frame_hdr_t *hp;
16123 16083
16124 16084 /*
16125 16085 * First step: get the packet
16126 16086 */
16127 16087 if (pkt == NULL) {
16128 16088 pkt = scsi_hba_pkt_alloc(pptr->port_dip, ap, cmdlen, statuslen,
16129 16089 tgtlen, sizeof (fcp_pkt_t) + pptr->port_priv_pkt_len,
16130 16090 callback, arg);
16131 16091 if (pkt == NULL) {
16132 16092 return (NULL);
16133 16093 }
16134 16094
16135 16095 /*
16136 16096 * All fields in scsi_pkt will be initialized properly or
16137 16097 * set to zero. We need do nothing for scsi_pkt.
16138 16098 */
16139 16099 /*
16140 16100 * But it's our responsibility to link other related data
16141 16101 * structures. Their initialization will be done, just
16142 16102 * before the scsi_pkt will be sent to FCA.
16143 16103 */
16144 16104 cmd = PKT2CMD(pkt);
16145 16105 cmd->cmd_pkt = pkt;
16146 16106 cmd->cmd_fp_pkt = &cmd->cmd_fc_packet;
16147 16107 /*
16148 16108 * fc_packet_t
16149 16109 */
16150 16110 cmd->cmd_fp_pkt->pkt_ulp_private = (opaque_t)cmd;
16151 16111 cmd->cmd_fp_pkt->pkt_fca_private = (opaque_t)((caddr_t)cmd +
16152 16112 sizeof (struct fcp_pkt));
16153 16113 cmd->cmd_fp_pkt->pkt_cmd = (caddr_t)&cmd->cmd_fcp_cmd;
16154 16114 cmd->cmd_fp_pkt->pkt_cmdlen = sizeof (struct fcp_cmd);
16155 16115 cmd->cmd_fp_pkt->pkt_resp = cmd->cmd_fcp_rsp;
16156 16116 cmd->cmd_fp_pkt->pkt_rsplen = FCP_MAX_RSP_IU_SIZE;
16157 16117 /*
16158 16118 * Fill in the Fabric Channel Header
16159 16119 */
16160 16120 hp = &cmd->cmd_fp_pkt->pkt_cmd_fhdr;
16161 16121 hp->r_ctl = R_CTL_COMMAND;
16162 16122 hp->rsvd = 0;
16163 16123 hp->type = FC_TYPE_SCSI_FCP;
16164 16124 hp->f_ctl = F_CTL_SEQ_INITIATIVE | F_CTL_FIRST_SEQ;
16165 16125 hp->seq_id = 0;
16166 16126 hp->df_ctl = 0;
16167 16127 hp->seq_cnt = 0;
16168 16128 hp->ox_id = 0xffff;
16169 16129 hp->rx_id = 0xffff;
16170 16130 hp->ro = 0;
16171 16131 } else {
16172 16132 /*
16173 16133 * We need think if we should reset any elements in
16174 16134 * related data structures.
16175 16135 */
16176 16136 FCP_TRACE(fcp_logq, pptr->port_instbuf,
16177 16137 fcp_trace, FCP_BUF_LEVEL_6, 0,
16178 16138 "reusing pkt, flags %d", flags);
16179 16139 cmd = PKT2CMD(pkt);
16180 16140 if (cmd->cmd_fp_pkt->pkt_pd) {
16181 16141 cmd->cmd_fp_pkt->pkt_pd = NULL;
16182 16142 }
16183 16143 }
16184 16144
16185 16145 /*
16186 16146 * Second step: dma allocation/move
16187 16147 */
16188 16148 if (bp && bp->b_bcount != 0) {
16189 16149 /*
16190 16150 * Mark if it's read or write
16191 16151 */
16192 16152 if (bp->b_flags & B_READ) {
16193 16153 cmd->cmd_flags |= CFLAG_IS_READ;
16194 16154 } else {
16195 16155 cmd->cmd_flags &= ~CFLAG_IS_READ;
16196 16156 }
16197 16157
16198 16158 bp_mapin(bp);
16199 16159 cmd->cmd_fp_pkt->pkt_data = bp->b_un.b_addr;
16200 16160 cmd->cmd_fp_pkt->pkt_datalen = bp->b_bcount;
16201 16161 cmd->cmd_fp_pkt->pkt_data_resid = 0;
16202 16162 } else {
16203 16163 /*
16204 16164 * It seldom happens, except when CLUSTER or SCSI_VHCI wants
16205 16165 * to send zero-length read/write.
16206 16166 */
16207 16167 cmd->cmd_fp_pkt->pkt_data = NULL;
16208 16168 cmd->cmd_fp_pkt->pkt_datalen = 0;
16209 16169 }
16210 16170
16211 16171 return (pkt);
16212 16172 }
16213 16173
16214 16174 static void
16215 16175 fcp_pseudo_destroy_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16216 16176 {
16217 16177 fcp_port_t *pptr = ADDR2FCP(ap);
16218 16178
16219 16179 /*
16220 16180 * First we let FCA to uninitilize private part.
16221 16181 */
16222 16182 (void) fc_ulp_uninit_packet(pptr->port_fp_handle,
16223 16183 PKT2CMD(pkt)->cmd_fp_pkt);
16224 16184
16225 16185 /*
16226 16186 * Then we uninitialize fc_packet.
16227 16187 */
16228 16188
16229 16189 /*
16230 16190 * Thirdly, we uninitializae fcp_pkt.
16231 16191 */
16232 16192
16233 16193 /*
16234 16194 * In the end, we free scsi_pkt.
16235 16195 */
16236 16196 scsi_hba_pkt_free(ap, pkt);
16237 16197 }
16238 16198
16239 16199 static int
16240 16200 fcp_pseudo_start(struct scsi_address *ap, struct scsi_pkt *pkt)
16241 16201 {
16242 16202 fcp_port_t *pptr = ADDR2FCP(ap);
16243 16203 fcp_lun_t *plun = ADDR2LUN(ap);
16244 16204 fcp_tgt_t *ptgt = plun->lun_tgt;
16245 16205 fcp_pkt_t *cmd = PKT2CMD(pkt);
16246 16206 fcp_cmd_t *fcmd = &cmd->cmd_fcp_cmd;
16247 16207 fc_packet_t *fpkt = cmd->cmd_fp_pkt;
16248 16208 int rval;
16249 16209
16250 16210 fpkt->pkt_pd = ptgt->tgt_pd_handle;
16251 16211 (void) fc_ulp_init_packet(pptr->port_fp_handle, cmd->cmd_fp_pkt, 1);
16252 16212
16253 16213 /*
16254 16214 * Firstly, we need initialize fcp_pkt_t
16255 16215 * Secondly, we need initialize fcp_cmd_t.
16256 16216 */
16257 16217 bcopy(pkt->pkt_cdbp, fcmd->fcp_cdb, pkt->pkt_cdblen);
16258 16218 fcmd->fcp_data_len = fpkt->pkt_datalen;
16259 16219 fcmd->fcp_ent_addr = plun->lun_addr;
16260 16220 if (pkt->pkt_flags & FLAG_HTAG) {
16261 16221 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_HEAD_OF_Q;
16262 16222 } else if (pkt->pkt_flags & FLAG_OTAG) {
16263 16223 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_ORDERED;
16264 16224 } else if (pkt->pkt_flags & FLAG_STAG) {
16265 16225 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_SIMPLE;
16266 16226 } else {
16267 16227 fcmd->fcp_cntl.cntl_qtype = FCP_QTYPE_UNTAGGED;
16268 16228 }
16269 16229
16270 16230 if (cmd->cmd_flags & CFLAG_IS_READ) {
16271 16231 fcmd->fcp_cntl.cntl_read_data = 1;
16272 16232 fcmd->fcp_cntl.cntl_write_data = 0;
16273 16233 } else {
16274 16234 fcmd->fcp_cntl.cntl_read_data = 0;
16275 16235 fcmd->fcp_cntl.cntl_write_data = 1;
16276 16236 }
16277 16237
16278 16238 /*
16279 16239 * Then we need initialize fc_packet_t too.
16280 16240 */
16281 16241 fpkt->pkt_timeout = pkt->pkt_time + 2;
16282 16242 fpkt->pkt_cmd_fhdr.d_id = ptgt->tgt_d_id;
16283 16243 fpkt->pkt_cmd_fhdr.s_id = pptr->port_id;
16284 16244 if (cmd->cmd_flags & CFLAG_IS_READ) {
16285 16245 fpkt->pkt_tran_type = FC_PKT_FCP_READ;
16286 16246 } else {
16287 16247 fpkt->pkt_tran_type = FC_PKT_FCP_WRITE;
16288 16248 }
16289 16249
16290 16250 if (pkt->pkt_flags & FLAG_NOINTR) {
16291 16251 fpkt->pkt_comp = NULL;
16292 16252 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_NO_INTR);
16293 16253 } else {
16294 16254 fpkt->pkt_comp = fcp_cmd_callback;
16295 16255 fpkt->pkt_tran_flags = (FC_TRAN_CLASS3 | FC_TRAN_INTR);
16296 16256 if (pkt->pkt_flags & FLAG_IMMEDIATE_CB) {
16297 16257 fpkt->pkt_tran_flags |= FC_TRAN_IMMEDIATE_CB;
16298 16258 }
16299 16259 }
16300 16260
16301 16261 /*
16302 16262 * Lastly, we need initialize scsi_pkt
16303 16263 */
16304 16264 pkt->pkt_reason = CMD_CMPLT;
16305 16265 pkt->pkt_state = 0;
16306 16266 pkt->pkt_statistics = 0;
16307 16267 pkt->pkt_resid = 0;
16308 16268
16309 16269 /*
16310 16270 * if interrupts aren't allowed (e.g. at dump time) then we'll
16311 16271 * have to do polled I/O
16312 16272 */
16313 16273 if (pkt->pkt_flags & FLAG_NOINTR) {
16314 16274 return (fcp_dopoll(pptr, cmd));
16315 16275 }
16316 16276
16317 16277 cmd->cmd_state = FCP_PKT_ISSUED;
16318 16278 rval = fcp_transport(pptr->port_fp_handle, fpkt, 0);
16319 16279 if (rval == FC_SUCCESS) {
16320 16280 return (TRAN_ACCEPT);
16321 16281 }
16322 16282
16323 16283 /*
16324 16284 * Need more consideration
16325 16285 *
16326 16286 * pkt->pkt_flags & FLAG_NOQUEUE could abort other pkt
16327 16287 */
16328 16288 cmd->cmd_state = FCP_PKT_IDLE;
16329 16289 if (rval == FC_TRAN_BUSY) {
16330 16290 return (TRAN_BUSY);
16331 16291 } else {
16332 16292 return (TRAN_FATAL_ERROR);
16333 16293 }
16334 16294 }
16335 16295
16336 16296 /*
16337 16297 * scsi_poll will always call tran_sync_pkt for pseudo FC-HBAs
16338 16298 * SCSA will initialize it to scsi_sync_cache_pkt for physical FC-HBAs
16339 16299 */
16340 16300 static void
16341 16301 fcp_pseudo_sync_pkt(struct scsi_address *ap, struct scsi_pkt *pkt)
16342 16302 {
16343 16303 FCP_TRACE(fcp_logq, "fcp_pseudo_sync_pkt", fcp_trace,
16344 16304 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16345 16305 }
16346 16306
16347 16307 /*
16348 16308 * scsi_dmafree will always call tran_dmafree, when STATE_ARQ_DONE
16349 16309 */
16350 16310 static void
16351 16311 fcp_pseudo_dmafree(struct scsi_address *ap, struct scsi_pkt *pkt)
16352 16312 {
16353 16313 FCP_TRACE(fcp_logq, "fcp_pseudo_dmafree", fcp_trace,
16354 16314 FCP_BUF_LEVEL_2, 0, "ap-%p, scsi_pkt-%p", ap, pkt);
16355 16315 }
|
↓ open down ↓ |
2894 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX