Print this page
NEX-20098 idm_refcnt_unref_task() fails to hold mutex before calling REFCNT_AUDIT
Reviewed by: Rob Gittins <rob.gittins@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
NEX-9981 Deadman timer panic from idm_refcnt_wait_ref thread while offlining iSCSI targets
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Sanjay Nadkarni <sanjay.nadkarni@nexenta.com>
Reviewed by: Rob Gittins <rob.gittins@nexenta.com>
NEX-6018 Return of the walking dead idm_refcnt_wait_ref comstar threads
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/idm/idm.c
+++ new/usr/src/uts/common/io/idm/idm.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 + * Copyright 2019 Nexenta Systems, Inc. All rights reserved.
23 24 */
24 25
25 26 #include <sys/cpuvar.h>
26 27 #include <sys/conf.h>
27 28 #include <sys/file.h>
28 29 #include <sys/ddi.h>
29 30 #include <sys/sunddi.h>
30 31 #include <sys/modctl.h>
31 32
32 33 #include <sys/socket.h>
33 34 #include <sys/strsubr.h>
34 35 #include <sys/sysmacros.h>
35 36
36 37 #include <sys/socketvar.h>
37 38 #include <netinet/in.h>
38 39
39 40 #include <sys/idm/idm.h>
40 41 #include <sys/idm/idm_so.h>
41 42
42 43 #define IDM_NAME_VERSION "iSCSI Data Mover"
43 44
44 45 extern struct mod_ops mod_miscops;
45 46 extern struct mod_ops mod_miscops;
46 47
47 48 static struct modlmisc modlmisc = {
48 49 &mod_miscops, /* Type of module */
49 50 IDM_NAME_VERSION
50 51 };
51 52
52 53 static struct modlinkage modlinkage = {
53 54 MODREV_1, (void *)&modlmisc, NULL
|
↓ open down ↓ |
21 lines elided |
↑ open up ↑ |
54 55 };
55 56
56 57 extern void idm_wd_thread(void *arg);
57 58
58 59 static int _idm_init(void);
59 60 static int _idm_fini(void);
60 61 static void idm_buf_bind_in_locked(idm_task_t *idt, idm_buf_t *buf);
61 62 static void idm_buf_bind_out_locked(idm_task_t *idt, idm_buf_t *buf);
62 63 static void idm_buf_unbind_in_locked(idm_task_t *idt, idm_buf_t *buf);
63 64 static void idm_buf_unbind_out_locked(idm_task_t *idt, idm_buf_t *buf);
64 -static void idm_task_abort_one(idm_conn_t *ic, idm_task_t *idt,
65 +static stmf_status_t idm_task_abort_one(idm_conn_t *ic, idm_task_t *idt,
65 66 idm_abort_type_t abort_type);
66 67 static void idm_task_aborted(idm_task_t *idt, idm_status_t status);
67 68 static idm_pdu_t *idm_pdu_alloc_common(uint_t hdrlen, uint_t datalen,
68 69 int sleepflag);
69 70
70 71 boolean_t idm_conn_logging = 0;
71 72 boolean_t idm_svc_logging = 0;
72 73 #ifdef DEBUG
73 74 boolean_t idm_pattern_checking = 1;
74 75 #else
75 76 boolean_t idm_pattern_checking = 0;
76 77 #endif
77 78
78 79 /*
79 80 * Potential tuneable for the maximum number of tasks. Default to
80 81 * IDM_TASKIDS_MAX
81 82 */
82 83
83 84 uint32_t idm_max_taskids = IDM_TASKIDS_MAX;
84 85
85 86 /*
86 87 * Global list of transport handles
87 88 * These are listed in preferential order, so we can simply take the
88 89 * first "it_conn_is_capable" hit. Note also that the order maps to
89 90 * the order of the idm_transport_type_t list.
90 91 */
91 92 idm_transport_t idm_transport_list[] = {
92 93
93 94 /* iSER on InfiniBand transport handle */
94 95 {IDM_TRANSPORT_TYPE_ISER, /* type */
95 96 "/devices/ib/iser@0:iser", /* device path */
96 97 NULL, /* LDI handle */
97 98 NULL, /* transport ops */
98 99 NULL}, /* transport caps */
99 100
100 101 /* IDM native sockets transport handle */
101 102 {IDM_TRANSPORT_TYPE_SOCKETS, /* type */
102 103 NULL, /* device path */
103 104 NULL, /* LDI handle */
104 105 NULL, /* transport ops */
105 106 NULL} /* transport caps */
106 107
107 108 };
108 109
109 110 int
110 111 _init(void)
111 112 {
112 113 int rc;
113 114
114 115 if ((rc = _idm_init()) != 0) {
115 116 return (rc);
116 117 }
117 118
118 119 return (mod_install(&modlinkage));
119 120 }
120 121
121 122 int
122 123 _fini(void)
123 124 {
124 125 int rc;
125 126
126 127 if ((rc = _idm_fini()) != 0) {
127 128 return (rc);
128 129 }
129 130
130 131 if ((rc = mod_remove(&modlinkage)) != 0) {
131 132 return (rc);
132 133 }
133 134
134 135 return (rc);
135 136 }
136 137
137 138 int
138 139 _info(struct modinfo *modinfop)
139 140 {
140 141 return (mod_info(&modlinkage, modinfop));
141 142 }
142 143
143 144 /*
144 145 * idm_transport_register()
145 146 *
146 147 * Provides a mechanism for an IDM transport driver to register its
147 148 * transport ops and caps with the IDM kernel module. Invoked during
148 149 * a transport driver's attach routine.
149 150 */
150 151 idm_status_t
151 152 idm_transport_register(idm_transport_attr_t *attr)
152 153 {
153 154 ASSERT(attr->it_ops != NULL);
154 155 ASSERT(attr->it_caps != NULL);
155 156
156 157 switch (attr->type) {
157 158 /* All known non-native transports here; for now, iSER */
158 159 case IDM_TRANSPORT_TYPE_ISER:
159 160 idm_transport_list[attr->type].it_ops = attr->it_ops;
160 161 idm_transport_list[attr->type].it_caps = attr->it_caps;
161 162 return (IDM_STATUS_SUCCESS);
162 163
163 164 default:
164 165 cmn_err(CE_NOTE, "idm: unknown transport type (0x%x) in "
165 166 "idm_transport_register", attr->type);
166 167 return (IDM_STATUS_SUCCESS);
167 168 }
168 169 }
169 170
170 171 /*
171 172 * idm_ini_conn_create
172 173 *
173 174 * This function is invoked by the iSCSI layer to create a connection context.
174 175 * This does not actually establish the socket connection.
175 176 *
176 177 * cr - Connection request parameters
177 178 * new_con - Output parameter that contains the new request if successful
178 179 *
179 180 */
180 181 idm_status_t
181 182 idm_ini_conn_create(idm_conn_req_t *cr, idm_conn_t **new_con)
182 183 {
183 184 idm_transport_t *it;
184 185 idm_conn_t *ic;
185 186 int rc;
186 187
187 188 it = idm_transport_lookup(cr);
188 189
189 190 retry:
190 191 ic = idm_conn_create_common(CONN_TYPE_INI, it->it_type,
191 192 &cr->icr_conn_ops);
192 193
193 194 bcopy(&cr->cr_ini_dst_addr, &ic->ic_ini_dst_addr,
194 195 sizeof (cr->cr_ini_dst_addr));
195 196
196 197 /* create the transport-specific connection components */
197 198 rc = it->it_ops->it_ini_conn_create(cr, ic);
198 199 if (rc != IDM_STATUS_SUCCESS) {
199 200 /* cleanup the failed connection */
200 201 idm_conn_destroy_common(ic);
201 202
202 203 /*
203 204 * It is possible for an IB client to connect to
204 205 * an ethernet-only client via an IB-eth gateway.
205 206 * Therefore, if we are attempting to use iSER and
206 207 * fail, retry with sockets before ultimately
207 208 * failing the connection.
208 209 */
209 210 if (it->it_type == IDM_TRANSPORT_TYPE_ISER) {
210 211 it = &idm_transport_list[IDM_TRANSPORT_TYPE_SOCKETS];
211 212 goto retry;
212 213 }
213 214
214 215 return (IDM_STATUS_FAIL);
215 216 }
216 217
217 218 *new_con = ic;
218 219
219 220 mutex_enter(&idm.idm_global_mutex);
220 221 list_insert_tail(&idm.idm_ini_conn_list, ic);
221 222 mutex_exit(&idm.idm_global_mutex);
222 223
223 224 return (IDM_STATUS_SUCCESS);
224 225 }
225 226
226 227 /*
227 228 * idm_ini_conn_destroy
228 229 *
229 230 * Releases any resources associated with the connection. This is the
230 231 * complement to idm_ini_conn_create.
231 232 * ic - idm_conn_t structure representing the relevant connection
232 233 *
233 234 */
234 235 void
235 236 idm_ini_conn_destroy_task(void *ic_void)
236 237 {
237 238 idm_conn_t *ic = ic_void;
238 239
239 240 ic->ic_transport_ops->it_ini_conn_destroy(ic);
240 241 idm_conn_destroy_common(ic);
241 242 }
242 243
243 244 void
244 245 idm_ini_conn_destroy(idm_conn_t *ic)
245 246 {
246 247 /*
247 248 * It's reasonable for the initiator to call idm_ini_conn_destroy
248 249 * from within the context of the CN_CONNECT_DESTROY notification.
249 250 * That's a problem since we want to destroy the taskq for the
250 251 * state machine associated with the connection. Remove the
251 252 * connection from the list right away then handle the remaining
252 253 * work via the idm_global_taskq.
253 254 */
254 255 mutex_enter(&idm.idm_global_mutex);
255 256 list_remove(&idm.idm_ini_conn_list, ic);
256 257 mutex_exit(&idm.idm_global_mutex);
257 258
258 259 if (taskq_dispatch(idm.idm_global_taskq,
259 260 &idm_ini_conn_destroy_task, ic, TQ_SLEEP) == NULL) {
260 261 cmn_err(CE_WARN,
261 262 "idm_ini_conn_destroy: Couldn't dispatch task");
262 263 }
263 264 }
264 265
265 266 /*
266 267 * idm_ini_conn_connect
267 268 *
268 269 * Establish connection to the remote system identified in idm_conn_t.
269 270 * The connection parameters including the remote IP address were established
270 271 * in the call to idm_ini_conn_create. The IDM state machine will
271 272 * perform client notifications as necessary to prompt the initiator through
272 273 * the login process. IDM also keeps a timer running so that if the login
273 274 * process doesn't complete in a timely manner it will fail.
274 275 *
275 276 * ic - idm_conn_t structure representing the relevant connection
276 277 *
277 278 * Returns success if the connection was established, otherwise some kind
278 279 * of meaningful error code.
279 280 *
280 281 * Upon return the login has either failed or is loggin in (ffp)
281 282 */
282 283 idm_status_t
283 284 idm_ini_conn_connect(idm_conn_t *ic)
284 285 {
285 286 idm_status_t rc;
286 287
287 288 rc = idm_conn_sm_init(ic);
288 289 if (rc != IDM_STATUS_SUCCESS) {
289 290 return (ic->ic_conn_sm_status);
290 291 }
291 292
292 293 /* Hold connection until we return */
293 294 idm_conn_hold(ic);
294 295
295 296 /* Kick state machine */
296 297 idm_conn_event(ic, CE_CONNECT_REQ, NULL);
297 298
298 299 /* Wait for login flag */
299 300 mutex_enter(&ic->ic_state_mutex);
300 301 while (!(ic->ic_state_flags & CF_LOGIN_READY) &&
301 302 !(ic->ic_state_flags & CF_ERROR)) {
302 303 cv_wait(&ic->ic_state_cv, &ic->ic_state_mutex);
303 304 }
304 305
305 306 /*
306 307 * The CN_READY_TO_LOGIN and/or the CN_CONNECT_FAIL call to
307 308 * idm_notify_client has already been generated by the idm conn
308 309 * state machine. If connection fails any time after this
309 310 * check, we will detect it in iscsi_login.
310 311 */
311 312 if (ic->ic_state_flags & CF_ERROR) {
312 313 rc = ic->ic_conn_sm_status;
313 314 }
314 315 mutex_exit(&ic->ic_state_mutex);
315 316 idm_conn_rele(ic);
316 317
317 318 return (rc);
318 319 }
319 320
320 321 /*
321 322 * idm_ini_conn_disconnect
322 323 *
323 324 * Forces a connection (previously established using idm_ini_conn_connect)
324 325 * to perform a controlled shutdown, cleaning up any outstanding requests.
325 326 *
326 327 * ic - idm_conn_t structure representing the relevant connection
327 328 *
328 329 * This is asynchronous and will return before the connection is properly
329 330 * shutdown
330 331 */
331 332 /* ARGSUSED */
332 333 void
333 334 idm_ini_conn_disconnect(idm_conn_t *ic)
334 335 {
335 336 idm_conn_event(ic, CE_TRANSPORT_FAIL, NULL);
336 337 }
337 338
338 339 /*
339 340 * idm_ini_conn_disconnect_wait
340 341 *
341 342 * Forces a connection (previously established using idm_ini_conn_connect)
342 343 * to perform a controlled shutdown. Blocks until the connection is
343 344 * disconnected.
344 345 *
345 346 * ic - idm_conn_t structure representing the relevant connection
346 347 */
347 348 /* ARGSUSED */
348 349 void
349 350 idm_ini_conn_disconnect_sync(idm_conn_t *ic)
350 351 {
351 352 mutex_enter(&ic->ic_state_mutex);
352 353 if ((ic->ic_state != CS_S9_INIT_ERROR) &&
353 354 (ic->ic_state != CS_S11_COMPLETE)) {
354 355 idm_conn_event_locked(ic, CE_TRANSPORT_FAIL, NULL, CT_NONE);
355 356 while ((ic->ic_state != CS_S9_INIT_ERROR) &&
356 357 (ic->ic_state != CS_S11_COMPLETE))
357 358 cv_wait(&ic->ic_state_cv, &ic->ic_state_mutex);
358 359 }
359 360 mutex_exit(&ic->ic_state_mutex);
360 361 }
361 362
362 363 /*
363 364 * idm_tgt_svc_create
364 365 *
365 366 * The target calls this service to obtain a service context for each available
366 367 * transport, starting a service of each type related to the IP address and port
367 368 * passed. The idm_svc_req_t contains the service parameters.
368 369 */
369 370 idm_status_t
370 371 idm_tgt_svc_create(idm_svc_req_t *sr, idm_svc_t **new_svc)
371 372 {
372 373 idm_transport_type_t type;
373 374 idm_transport_t *it;
374 375 idm_svc_t *is;
375 376 int rc;
376 377
377 378 *new_svc = NULL;
378 379 is = kmem_zalloc(sizeof (idm_svc_t), KM_SLEEP);
379 380
380 381 /* Initialize transport-agnostic components of the service handle */
381 382 is->is_svc_req = *sr;
382 383 mutex_init(&is->is_mutex, NULL, MUTEX_DEFAULT, NULL);
383 384 cv_init(&is->is_cv, NULL, CV_DEFAULT, NULL);
384 385 mutex_init(&is->is_count_mutex, NULL, MUTEX_DEFAULT, NULL);
385 386 cv_init(&is->is_count_cv, NULL, CV_DEFAULT, NULL);
386 387 idm_refcnt_init(&is->is_refcnt, is);
387 388
388 389 /*
389 390 * Make sure all available transports are setup. We call this now
390 391 * instead of at initialization time in case IB has become available
391 392 * since we started (hotplug, etc).
392 393 */
393 394 idm_transport_setup(sr->sr_li, B_FALSE);
394 395
395 396 /*
396 397 * Loop through the transports, configuring the transport-specific
397 398 * components of each one.
398 399 */
399 400 for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
400 401
401 402 it = &idm_transport_list[type];
402 403 /*
403 404 * If it_ops is NULL then the transport is unconfigured
404 405 * and we shouldn't try to start the service.
405 406 */
406 407 if (it->it_ops == NULL) {
407 408 continue;
408 409 }
409 410
410 411 rc = it->it_ops->it_tgt_svc_create(sr, is);
411 412 if (rc != IDM_STATUS_SUCCESS) {
412 413 /* Teardown any configured services */
413 414 while (type--) {
414 415 it = &idm_transport_list[type];
415 416 if (it->it_ops == NULL) {
416 417 continue;
417 418 }
418 419 it->it_ops->it_tgt_svc_destroy(is);
419 420 }
420 421 /* Free the svc context and return */
421 422 kmem_free(is, sizeof (idm_svc_t));
422 423 return (rc);
423 424 }
424 425 }
425 426
426 427 *new_svc = is;
427 428
428 429 mutex_enter(&idm.idm_global_mutex);
429 430 list_insert_tail(&idm.idm_tgt_svc_list, is);
430 431 mutex_exit(&idm.idm_global_mutex);
431 432
432 433 return (IDM_STATUS_SUCCESS);
433 434 }
434 435
435 436 /*
436 437 * idm_tgt_svc_destroy
437 438 *
438 439 * is - idm_svc_t returned by the call to idm_tgt_svc_create
439 440 *
440 441 * Cleanup any resources associated with the idm_svc_t.
441 442 */
442 443 void
443 444 idm_tgt_svc_destroy(idm_svc_t *is)
444 445 {
445 446 idm_transport_type_t type;
446 447 idm_transport_t *it;
447 448
448 449 mutex_enter(&idm.idm_global_mutex);
449 450 /* remove this service from the global list */
450 451 list_remove(&idm.idm_tgt_svc_list, is);
451 452 /* wakeup any waiters for service change */
452 453 cv_broadcast(&idm.idm_tgt_svc_cv);
453 454 mutex_exit(&idm.idm_global_mutex);
454 455
455 456 /* teardown each transport-specific service */
456 457 for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
457 458 it = &idm_transport_list[type];
458 459 if (it->it_ops == NULL) {
459 460 continue;
460 461 }
461 462
462 463 it->it_ops->it_tgt_svc_destroy(is);
463 464 }
464 465
465 466 /* tear down the svc resources */
466 467 idm_refcnt_destroy(&is->is_refcnt);
467 468 cv_destroy(&is->is_count_cv);
468 469 mutex_destroy(&is->is_count_mutex);
469 470 cv_destroy(&is->is_cv);
470 471 mutex_destroy(&is->is_mutex);
471 472
472 473 /* free the svc handle */
473 474 kmem_free(is, sizeof (idm_svc_t));
474 475 }
475 476
476 477 void
477 478 idm_tgt_svc_hold(idm_svc_t *is)
478 479 {
479 480 idm_refcnt_hold(&is->is_refcnt);
480 481 }
481 482
482 483 void
483 484 idm_tgt_svc_rele_and_destroy(idm_svc_t *is)
484 485 {
485 486 idm_refcnt_rele_and_destroy(&is->is_refcnt,
486 487 (idm_refcnt_cb_t *)&idm_tgt_svc_destroy);
487 488 }
488 489
489 490 /*
490 491 * idm_tgt_svc_online
491 492 *
492 493 * is - idm_svc_t returned by the call to idm_tgt_svc_create
493 494 *
494 495 * Online each transport service, as we want this target to be accessible
495 496 * via any configured transport.
496 497 *
497 498 * When the initiator establishes a new connection to the target, IDM will
498 499 * call the "new connect" callback defined in the idm_svc_req_t structure
499 500 * and it will pass an idm_conn_t structure representing that new connection.
500 501 */
501 502 idm_status_t
502 503 idm_tgt_svc_online(idm_svc_t *is)
503 504 {
504 505
505 506 idm_transport_type_t type, last_type;
506 507 idm_transport_t *it;
507 508 int rc = IDM_STATUS_SUCCESS;
508 509
509 510 mutex_enter(&is->is_mutex);
510 511 if (is->is_online == 0) {
511 512 /* Walk through each of the transports and online them */
512 513 for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
513 514 it = &idm_transport_list[type];
514 515 if (it->it_ops == NULL) {
515 516 /* transport is not registered */
516 517 continue;
517 518 }
518 519
519 520 mutex_exit(&is->is_mutex);
520 521 rc = it->it_ops->it_tgt_svc_online(is);
521 522 mutex_enter(&is->is_mutex);
522 523 if (rc != IDM_STATUS_SUCCESS) {
523 524 last_type = type;
524 525 break;
525 526 }
526 527 }
527 528 if (rc != IDM_STATUS_SUCCESS) {
528 529 /*
529 530 * The last transport failed to online.
530 531 * Offline any transport onlined above and
531 532 * do not online the target.
532 533 */
533 534 for (type = 0; type < last_type; type++) {
534 535 it = &idm_transport_list[type];
535 536 if (it->it_ops == NULL) {
536 537 /* transport is not registered */
537 538 continue;
538 539 }
539 540
540 541 mutex_exit(&is->is_mutex);
541 542 it->it_ops->it_tgt_svc_offline(is);
542 543 mutex_enter(&is->is_mutex);
543 544 }
544 545 } else {
545 546 /* Target service now online */
546 547 is->is_online = 1;
547 548 }
548 549 } else {
549 550 /* Target service already online, just bump the count */
550 551 is->is_online++;
551 552 }
552 553 mutex_exit(&is->is_mutex);
553 554
554 555 return (rc);
555 556 }
556 557
557 558 /*
558 559 * idm_tgt_svc_offline
559 560 *
560 561 * is - idm_svc_t returned by the call to idm_tgt_svc_create
561 562 *
562 563 * Shutdown any online target services.
563 564 */
564 565 void
565 566 idm_tgt_svc_offline(idm_svc_t *is)
566 567 {
567 568 idm_transport_type_t type;
568 569 idm_transport_t *it;
569 570
570 571 mutex_enter(&is->is_mutex);
571 572 is->is_online--;
572 573 if (is->is_online == 0) {
573 574 /* Walk through each of the transports and offline them */
574 575 for (type = 0; type < IDM_TRANSPORT_NUM_TYPES; type++) {
575 576 it = &idm_transport_list[type];
576 577 if (it->it_ops == NULL) {
577 578 /* transport is not registered */
578 579 continue;
579 580 }
580 581
581 582 mutex_exit(&is->is_mutex);
582 583 it->it_ops->it_tgt_svc_offline(is);
583 584 mutex_enter(&is->is_mutex);
584 585 }
585 586 }
586 587 mutex_exit(&is->is_mutex);
587 588 }
588 589
589 590 /*
590 591 * idm_tgt_svc_lookup
591 592 *
592 593 * Lookup a service instance listening on the specified port
593 594 */
594 595
595 596 idm_svc_t *
596 597 idm_tgt_svc_lookup(uint16_t port)
597 598 {
598 599 idm_svc_t *result;
599 600
600 601 retry:
601 602 mutex_enter(&idm.idm_global_mutex);
602 603 for (result = list_head(&idm.idm_tgt_svc_list);
603 604 result != NULL;
604 605 result = list_next(&idm.idm_tgt_svc_list, result)) {
605 606 if (result->is_svc_req.sr_port == port) {
606 607 if (result->is_online == 0) {
607 608 /*
608 609 * A service exists on this port, but it
609 610 * is going away, wait for it to cleanup.
610 611 */
611 612 cv_wait(&idm.idm_tgt_svc_cv,
612 613 &idm.idm_global_mutex);
613 614 mutex_exit(&idm.idm_global_mutex);
614 615 goto retry;
615 616 }
616 617 idm_tgt_svc_hold(result);
617 618 mutex_exit(&idm.idm_global_mutex);
618 619 return (result);
619 620 }
620 621 }
621 622 mutex_exit(&idm.idm_global_mutex);
622 623
623 624 return (NULL);
624 625 }
625 626
626 627 /*
627 628 * idm_negotiate_key_values()
628 629 * Give IDM level a chance to negotiate any login parameters it should own.
629 630 * -- leave unhandled parameters alone on request_nvl
630 631 * -- move all handled parameters to response_nvl with an appropriate response
631 632 * -- also add an entry to negotiated_nvl for any accepted parameters
632 633 */
633 634 kv_status_t
634 635 idm_negotiate_key_values(idm_conn_t *ic, nvlist_t *request_nvl,
635 636 nvlist_t *response_nvl, nvlist_t *negotiated_nvl)
636 637 {
637 638 ASSERT(ic->ic_transport_ops != NULL);
638 639 return (ic->ic_transport_ops->it_negotiate_key_values(ic,
639 640 request_nvl, response_nvl, negotiated_nvl));
640 641 }
641 642
642 643 /*
643 644 * idm_notice_key_values()
644 645 * Activate at the IDM level any parameters that have been negotiated.
645 646 * Passes the set of key value pairs to the transport for activation.
646 647 * This will be invoked as the connection is entering full-feature mode.
647 648 */
648 649 void
649 650 idm_notice_key_values(idm_conn_t *ic, nvlist_t *negotiated_nvl)
650 651 {
651 652 ASSERT(ic->ic_transport_ops != NULL);
652 653 ic->ic_transport_ops->it_notice_key_values(ic, negotiated_nvl);
653 654 }
654 655
655 656 /*
656 657 * idm_declare_key_values()
657 658 * Activate an operational set of declarative parameters from the config_nvl,
658 659 * and return the selected values in the outgoing_nvl.
659 660 */
660 661 kv_status_t
661 662 idm_declare_key_values(idm_conn_t *ic, nvlist_t *config_nvl,
662 663 nvlist_t *outgoing_nvl)
663 664 {
664 665 ASSERT(ic->ic_transport_ops != NULL);
665 666 return (ic->ic_transport_ops->it_declare_key_values(ic, config_nvl,
666 667 outgoing_nvl));
667 668 }
668 669
669 670 /*
670 671 * idm_buf_tx_to_ini
671 672 *
672 673 * This is IDM's implementation of the 'Put_Data' operational primitive.
673 674 *
674 675 * This function is invoked by a target iSCSI layer to request its local
675 676 * Datamover layer to transmit the Data-In PDU to the peer iSCSI layer
676 677 * on the remote iSCSI node. The I/O buffer represented by 'idb' is
677 678 * transferred to the initiator associated with task 'idt'. The connection
678 679 * info, contents of the Data-In PDU header, the DataDescriptorIn, BHS,
679 680 * and the callback (idb->idb_buf_cb) at transfer completion are
680 681 * provided as input.
681 682 *
682 683 * This data transfer takes place transparently to the remote iSCSI layer,
683 684 * i.e. without its participation.
684 685 *
685 686 * Using sockets, IDM implements the data transfer by segmenting the data
686 687 * buffer into appropriately sized iSCSI PDUs and transmitting them to the
687 688 * initiator. iSER performs the transfer using RDMA write.
688 689 *
689 690 */
690 691 idm_status_t
691 692 idm_buf_tx_to_ini(idm_task_t *idt, idm_buf_t *idb,
692 693 uint32_t offset, uint32_t xfer_len,
693 694 idm_buf_cb_t idb_buf_cb, void *cb_arg)
694 695 {
695 696 idm_status_t rc;
696 697
697 698 idb->idb_bufoffset = offset;
698 699 idb->idb_xfer_len = xfer_len;
699 700 idb->idb_buf_cb = idb_buf_cb;
700 701 idb->idb_cb_arg = cb_arg;
701 702 gethrestime(&idb->idb_xfer_start);
702 703
703 704 /*
704 705 * Buffer should not contain the pattern. If the pattern is
705 706 * present then we've been asked to transmit initialized data
706 707 */
707 708 IDM_BUFPAT_CHECK(idb, xfer_len, BP_CHECK_ASSERT);
708 709
709 710 mutex_enter(&idt->idt_mutex);
710 711 switch (idt->idt_state) {
711 712 case TASK_ACTIVE:
712 713 idt->idt_tx_to_ini_start++;
713 714 idm_task_hold(idt);
714 715 idm_buf_bind_in_locked(idt, idb);
715 716 idb->idb_in_transport = B_TRUE;
716 717 rc = (*idt->idt_ic->ic_transport_ops->it_buf_tx_to_ini)
717 718 (idt, idb);
718 719 return (rc);
719 720
720 721 case TASK_SUSPENDING:
721 722 case TASK_SUSPENDED:
722 723 /*
723 724 * Bind buffer but don't start a transfer since the task
724 725 * is suspended
725 726 */
726 727 idm_buf_bind_in_locked(idt, idb);
727 728 mutex_exit(&idt->idt_mutex);
728 729 return (IDM_STATUS_SUCCESS);
729 730
730 731 case TASK_ABORTING:
731 732 case TASK_ABORTED:
732 733 /*
733 734 * Once the task is aborted, any buffers added to the
734 735 * idt_inbufv will never get cleaned up, so just return
735 736 * SUCCESS. The buffer should get cleaned up by the
736 737 * client or framework once task_aborted has completed.
737 738 */
738 739 mutex_exit(&idt->idt_mutex);
739 740 return (IDM_STATUS_SUCCESS);
740 741
741 742 default:
742 743 ASSERT(0);
743 744 break;
744 745 }
745 746 mutex_exit(&idt->idt_mutex);
746 747
747 748 return (IDM_STATUS_FAIL);
748 749 }
749 750
750 751 /*
751 752 * idm_buf_rx_from_ini
752 753 *
753 754 * This is IDM's implementation of the 'Get_Data' operational primitive.
754 755 *
755 756 * This function is invoked by a target iSCSI layer to request its local
756 757 * Datamover layer to retrieve certain data identified by the R2T PDU from the
757 758 * peer iSCSI layer on the remote node. The retrieved Data-Out PDU will be
758 759 * mapped to the respective buffer by the task tags (ITT & TTT).
759 760 * The connection information, contents of an R2T PDU, DataDescriptor, BHS, and
760 761 * the callback (idb->idb_buf_cb) notification for data transfer completion are
761 762 * are provided as input.
762 763 *
763 764 * When an iSCSI node sends an R2T PDU to its local Datamover layer, the local
764 765 * Datamover layer, the local and remote Datamover layers transparently bring
765 766 * about the data transfer requested by the R2T PDU, without the participation
766 767 * of the iSCSI layers.
767 768 *
768 769 * Using sockets, IDM transmits an R2T PDU for each buffer and the rx_data_out()
769 770 * assembles the Data-Out PDUs into the buffer. iSER uses RDMA read.
770 771 *
771 772 */
772 773 idm_status_t
773 774 idm_buf_rx_from_ini(idm_task_t *idt, idm_buf_t *idb,
774 775 uint32_t offset, uint32_t xfer_len,
775 776 idm_buf_cb_t idb_buf_cb, void *cb_arg)
776 777 {
777 778 idm_status_t rc;
778 779
779 780 idb->idb_bufoffset = offset;
780 781 idb->idb_xfer_len = xfer_len;
781 782 idb->idb_buf_cb = idb_buf_cb;
782 783 idb->idb_cb_arg = cb_arg;
783 784 gethrestime(&idb->idb_xfer_start);
784 785
785 786 /*
786 787 * "In" buf list is for "Data In" PDU's, "Out" buf list is for
787 788 * "Data Out" PDU's
788 789 */
789 790 mutex_enter(&idt->idt_mutex);
790 791 switch (idt->idt_state) {
791 792 case TASK_ACTIVE:
792 793 idt->idt_rx_from_ini_start++;
793 794 idm_task_hold(idt);
794 795 idm_buf_bind_out_locked(idt, idb);
795 796 idb->idb_in_transport = B_TRUE;
796 797 rc = (*idt->idt_ic->ic_transport_ops->it_buf_rx_from_ini)
797 798 (idt, idb);
798 799 return (rc);
799 800 case TASK_SUSPENDING:
800 801 case TASK_SUSPENDED:
801 802 case TASK_ABORTING:
802 803 case TASK_ABORTED:
803 804 /*
804 805 * Bind buffer but don't start a transfer since the task
805 806 * is suspended
806 807 */
807 808 idm_buf_bind_out_locked(idt, idb);
808 809 mutex_exit(&idt->idt_mutex);
809 810 return (IDM_STATUS_SUCCESS);
810 811 default:
811 812 ASSERT(0);
812 813 break;
813 814 }
814 815 mutex_exit(&idt->idt_mutex);
815 816
816 817 return (IDM_STATUS_FAIL);
817 818 }
818 819
819 820 /*
820 821 * idm_buf_tx_to_ini_done
821 822 *
822 823 * The transport calls this after it has completed a transfer requested by
823 824 * a call to transport_buf_tx_to_ini
824 825 *
825 826 * Caller holds idt->idt_mutex, idt->idt_mutex is released before returning.
826 827 * idt may be freed after the call to idb->idb_buf_cb.
827 828 */
828 829 void
829 830 idm_buf_tx_to_ini_done(idm_task_t *idt, idm_buf_t *idb, idm_status_t status)
830 831 {
831 832 ASSERT(mutex_owned(&idt->idt_mutex));
832 833 idb->idb_in_transport = B_FALSE;
833 834 idb->idb_tx_thread = B_FALSE;
834 835 idt->idt_tx_to_ini_done++;
835 836 gethrestime(&idb->idb_xfer_done);
836 837
837 838 /*
838 839 * idm_refcnt_rele may cause TASK_SUSPENDING --> TASK_SUSPENDED or
839 840 * TASK_ABORTING --> TASK_ABORTED transistion if the refcount goes
840 841 * to 0.
841 842 */
842 843 idm_task_rele(idt);
843 844 idb->idb_status = status;
844 845
845 846 switch (idt->idt_state) {
846 847 case TASK_ACTIVE:
847 848 idt->idt_ic->ic_timestamp = ddi_get_lbolt();
848 849 idm_buf_unbind_in_locked(idt, idb);
849 850 mutex_exit(&idt->idt_mutex);
850 851 (*idb->idb_buf_cb)(idb, status);
851 852 return;
852 853 case TASK_SUSPENDING:
853 854 case TASK_SUSPENDED:
854 855 case TASK_ABORTING:
855 856 case TASK_ABORTED:
856 857 /*
857 858 * To keep things simple we will ignore the case where the
858 859 * transfer was successful and leave all buffers bound to the
859 860 * task. This allows us to also ignore the case where we've
860 861 * been asked to abort a task but the last transfer of the
861 862 * task has completed. IDM has no idea whether this was, in
862 863 * fact, the last transfer of the task so it would be difficult
863 864 * to handle this case. Everything should get sorted out again
864 865 * after task reassignment is complete.
865 866 *
866 867 * In the case of TASK_ABORTING we could conceivably call the
867 868 * buffer callback here but the timing of when the client's
868 869 * client_task_aborted callback is invoked vs. when the client's
869 870 * buffer callback gets invoked gets sticky. We don't want
870 871 * the client to here from us again after the call to
871 872 * client_task_aborted() but we don't want to give it a bunch
872 873 * of failed buffer transfers until we've called
873 874 * client_task_aborted(). Instead we'll just leave all the
874 875 * buffers bound and allow the client to cleanup.
875 876 */
876 877 break;
877 878 default:
878 879 ASSERT(0);
879 880 }
880 881 mutex_exit(&idt->idt_mutex);
881 882 }
882 883
883 884 /*
884 885 * idm_buf_rx_from_ini_done
885 886 *
886 887 * The transport calls this after it has completed a transfer requested by
887 888 * a call totransport_buf_tx_to_ini
888 889 *
889 890 * Caller holds idt->idt_mutex, idt->idt_mutex is released before returning.
890 891 * idt may be freed after the call to idb->idb_buf_cb.
891 892 */
892 893 void
893 894 idm_buf_rx_from_ini_done(idm_task_t *idt, idm_buf_t *idb, idm_status_t status)
894 895 {
895 896 ASSERT(mutex_owned(&idt->idt_mutex));
896 897 idb->idb_in_transport = B_FALSE;
897 898 idt->idt_rx_from_ini_done++;
898 899 gethrestime(&idb->idb_xfer_done);
899 900
900 901 /*
901 902 * idm_refcnt_rele may cause TASK_SUSPENDING --> TASK_SUSPENDED or
902 903 * TASK_ABORTING --> TASK_ABORTED transistion if the refcount goes
903 904 * to 0.
904 905 */
905 906 idm_task_rele(idt);
906 907 idb->idb_status = status;
907 908
908 909 if (status == IDM_STATUS_SUCCESS) {
909 910 /*
910 911 * Buffer should not contain the pattern. If it does then
911 912 * we did not get the data from the remote host.
912 913 */
913 914 IDM_BUFPAT_CHECK(idb, idb->idb_xfer_len, BP_CHECK_ASSERT);
914 915 }
915 916
916 917 switch (idt->idt_state) {
917 918 case TASK_ACTIVE:
918 919 idt->idt_ic->ic_timestamp = ddi_get_lbolt();
919 920 idm_buf_unbind_out_locked(idt, idb);
920 921 mutex_exit(&idt->idt_mutex);
921 922 (*idb->idb_buf_cb)(idb, status);
922 923 return;
923 924 case TASK_SUSPENDING:
924 925 case TASK_SUSPENDED:
925 926 case TASK_ABORTING:
926 927 case TASK_ABORTED:
927 928 /*
928 929 * To keep things simple we will ignore the case where the
929 930 * transfer was successful and leave all buffers bound to the
930 931 * task. This allows us to also ignore the case where we've
931 932 * been asked to abort a task but the last transfer of the
932 933 * task has completed. IDM has no idea whether this was, in
933 934 * fact, the last transfer of the task so it would be difficult
934 935 * to handle this case. Everything should get sorted out again
935 936 * after task reassignment is complete.
936 937 *
937 938 * In the case of TASK_ABORTING we could conceivably call the
938 939 * buffer callback here but the timing of when the client's
939 940 * client_task_aborted callback is invoked vs. when the client's
940 941 * buffer callback gets invoked gets sticky. We don't want
941 942 * the client to here from us again after the call to
942 943 * client_task_aborted() but we don't want to give it a bunch
943 944 * of failed buffer transfers until we've called
944 945 * client_task_aborted(). Instead we'll just leave all the
945 946 * buffers bound and allow the client to cleanup.
946 947 */
947 948 break;
948 949 default:
949 950 ASSERT(0);
950 951 }
951 952 mutex_exit(&idt->idt_mutex);
952 953 }
953 954
954 955 /*
955 956 * idm_buf_alloc
956 957 *
957 958 * Allocates a buffer handle and registers it for use with the transport
958 959 * layer. If a buffer is not passed on bufptr, the buffer will be allocated
959 960 * as well as the handle.
960 961 *
961 962 * ic - connection on which the buffer will be transferred
962 963 * bufptr - allocate memory for buffer if NULL, else assign to buffer
963 964 * buflen - length of buffer
964 965 *
965 966 * Returns idm_buf_t handle if successful, otherwise NULL
966 967 */
967 968 idm_buf_t *
968 969 idm_buf_alloc(idm_conn_t *ic, void *bufptr, uint64_t buflen)
969 970 {
970 971 idm_buf_t *buf = NULL;
971 972 int rc;
972 973
973 974 ASSERT(ic != NULL);
974 975 ASSERT(idm.idm_buf_cache != NULL);
975 976 ASSERT(buflen > 0);
976 977
977 978 /* Don't allocate new buffers if we are not in FFP */
978 979 mutex_enter(&ic->ic_state_mutex);
979 980 if (!ic->ic_ffp) {
980 981 mutex_exit(&ic->ic_state_mutex);
981 982 return (NULL);
982 983 }
983 984
984 985
985 986 idm_conn_hold(ic);
986 987 mutex_exit(&ic->ic_state_mutex);
987 988
988 989 buf = kmem_cache_alloc(idm.idm_buf_cache, KM_NOSLEEP);
989 990 if (buf == NULL) {
990 991 idm_conn_rele(ic);
991 992 return (NULL);
992 993 }
993 994
994 995 buf->idb_ic = ic;
995 996 buf->idb_buflen = buflen;
996 997 buf->idb_exp_offset = 0;
997 998 buf->idb_bufoffset = 0;
998 999 buf->idb_xfer_len = 0;
999 1000 buf->idb_magic = IDM_BUF_MAGIC;
1000 1001 buf->idb_in_transport = B_FALSE;
1001 1002 buf->idb_bufbcopy = B_FALSE;
1002 1003
1003 1004 /*
1004 1005 * If bufptr is NULL, we have an implicit request to allocate
1005 1006 * memory for this IDM buffer handle and register it for use
1006 1007 * with the transport. To simplify this, and to give more freedom
1007 1008 * to the transport layer for it's own buffer management, both of
1008 1009 * these actions will take place in the transport layer.
1009 1010 * If bufptr is set, then the caller has allocated memory (or more
1010 1011 * likely it's been passed from an upper layer), and we need only
1011 1012 * register the buffer for use with the transport layer.
1012 1013 */
1013 1014 if (bufptr == NULL) {
1014 1015 /*
1015 1016 * Allocate a buffer from the transport layer (which
1016 1017 * will also register the buffer for use).
1017 1018 */
1018 1019 rc = ic->ic_transport_ops->it_buf_alloc(buf, buflen);
1019 1020 if (rc != 0) {
1020 1021 idm_conn_rele(ic);
1021 1022 kmem_cache_free(idm.idm_buf_cache, buf);
1022 1023 return (NULL);
1023 1024 }
1024 1025 /* Set the bufalloc'd flag */
1025 1026 buf->idb_bufalloc = B_TRUE;
1026 1027 } else {
1027 1028 /*
1028 1029 * For large transfers, Set the passed bufptr into
1029 1030 * the buf handle, and register the handle with the
1030 1031 * transport layer. As memory registration with the
1031 1032 * transport layer is a time/cpu intensive operation,
1032 1033 * for small transfers (up to a pre-defined bcopy
1033 1034 * threshold), use pre-registered memory buffers
1034 1035 * and bcopy data at the appropriate time.
1035 1036 */
1036 1037 buf->idb_buf = bufptr;
1037 1038
1038 1039 rc = ic->ic_transport_ops->it_buf_setup(buf);
1039 1040 if (rc != 0) {
1040 1041 idm_conn_rele(ic);
1041 1042 kmem_cache_free(idm.idm_buf_cache, buf);
1042 1043 return (NULL);
1043 1044 }
1044 1045 /*
1045 1046 * The transport layer is now expected to set the idb_bufalloc
1046 1047 * correctly to indicate if resources have been allocated.
1047 1048 */
1048 1049 }
1049 1050
1050 1051 IDM_BUFPAT_SET(buf);
1051 1052
1052 1053 return (buf);
1053 1054 }
1054 1055
1055 1056 /*
1056 1057 * idm_buf_free
1057 1058 *
1058 1059 * Release a buffer handle along with the associated buffer that was allocated
1059 1060 * or assigned with idm_buf_alloc
1060 1061 */
1061 1062 void
1062 1063 idm_buf_free(idm_buf_t *buf)
1063 1064 {
1064 1065 idm_conn_t *ic = buf->idb_ic;
1065 1066
1066 1067
1067 1068 buf->idb_task_binding = NULL;
1068 1069
1069 1070 if (buf->idb_bufalloc) {
1070 1071 ic->ic_transport_ops->it_buf_free(buf);
1071 1072 } else {
1072 1073 ic->ic_transport_ops->it_buf_teardown(buf);
1073 1074 }
1074 1075 kmem_cache_free(idm.idm_buf_cache, buf);
1075 1076 idm_conn_rele(ic);
1076 1077 }
1077 1078
1078 1079 /*
1079 1080 * idm_buf_bind_in
1080 1081 *
1081 1082 * This function associates a buffer with a task. This is only for use by the
1082 1083 * iSCSI initiator that will have only one buffer per transfer direction
1083 1084 *
1084 1085 */
1085 1086 void
1086 1087 idm_buf_bind_in(idm_task_t *idt, idm_buf_t *buf)
1087 1088 {
1088 1089 mutex_enter(&idt->idt_mutex);
1089 1090 idm_buf_bind_in_locked(idt, buf);
1090 1091 mutex_exit(&idt->idt_mutex);
1091 1092 }
1092 1093
1093 1094 static void
1094 1095 idm_buf_bind_in_locked(idm_task_t *idt, idm_buf_t *buf)
1095 1096 {
1096 1097 buf->idb_task_binding = idt;
1097 1098 buf->idb_ic = idt->idt_ic;
1098 1099 idm_listbuf_insert(&idt->idt_inbufv, buf);
1099 1100 }
1100 1101
1101 1102 void
1102 1103 idm_buf_bind_out(idm_task_t *idt, idm_buf_t *buf)
1103 1104 {
1104 1105 /*
1105 1106 * For small transfers, the iSER transport delegates the IDM
1106 1107 * layer to bcopy the SCSI Write data for faster IOPS.
1107 1108 */
1108 1109 if (buf->idb_bufbcopy == B_TRUE) {
1109 1110
1110 1111 bcopy(buf->idb_bufptr, buf->idb_buf, buf->idb_buflen);
1111 1112 }
1112 1113 mutex_enter(&idt->idt_mutex);
1113 1114 idm_buf_bind_out_locked(idt, buf);
1114 1115 mutex_exit(&idt->idt_mutex);
1115 1116 }
1116 1117
1117 1118 static void
1118 1119 idm_buf_bind_out_locked(idm_task_t *idt, idm_buf_t *buf)
1119 1120 {
1120 1121 buf->idb_task_binding = idt;
1121 1122 buf->idb_ic = idt->idt_ic;
1122 1123 idm_listbuf_insert(&idt->idt_outbufv, buf);
1123 1124 }
1124 1125
1125 1126 void
1126 1127 idm_buf_unbind_in(idm_task_t *idt, idm_buf_t *buf)
1127 1128 {
1128 1129 /*
1129 1130 * For small transfers, the iSER transport delegates the IDM
1130 1131 * layer to bcopy the SCSI Read data into the read buufer
1131 1132 * for faster IOPS.
1132 1133 */
1133 1134 if (buf->idb_bufbcopy == B_TRUE) {
1134 1135 bcopy(buf->idb_buf, buf->idb_bufptr, buf->idb_buflen);
1135 1136 }
1136 1137 mutex_enter(&idt->idt_mutex);
1137 1138 idm_buf_unbind_in_locked(idt, buf);
1138 1139 mutex_exit(&idt->idt_mutex);
1139 1140 }
1140 1141
1141 1142 static void
1142 1143 idm_buf_unbind_in_locked(idm_task_t *idt, idm_buf_t *buf)
1143 1144 {
1144 1145 list_remove(&idt->idt_inbufv, buf);
1145 1146 }
1146 1147
1147 1148 void
1148 1149 idm_buf_unbind_out(idm_task_t *idt, idm_buf_t *buf)
1149 1150 {
1150 1151 mutex_enter(&idt->idt_mutex);
1151 1152 idm_buf_unbind_out_locked(idt, buf);
1152 1153 mutex_exit(&idt->idt_mutex);
1153 1154 }
1154 1155
1155 1156 static void
1156 1157 idm_buf_unbind_out_locked(idm_task_t *idt, idm_buf_t *buf)
1157 1158 {
1158 1159 list_remove(&idt->idt_outbufv, buf);
1159 1160 }
1160 1161
1161 1162 /*
1162 1163 * idm_buf_find() will lookup the idm_buf_t based on the relative offset in the
1163 1164 * iSCSI PDU
1164 1165 */
1165 1166 idm_buf_t *
1166 1167 idm_buf_find(void *lbuf, size_t data_offset)
1167 1168 {
1168 1169 idm_buf_t *idb;
1169 1170 list_t *lst = (list_t *)lbuf;
1170 1171
1171 1172 /* iterate through the list to find the buffer */
1172 1173 for (idb = list_head(lst); idb != NULL; idb = list_next(lst, idb)) {
1173 1174
1174 1175 ASSERT((idb->idb_ic->ic_conn_type == CONN_TYPE_TGT) ||
1175 1176 (idb->idb_bufoffset == 0));
1176 1177
1177 1178 if ((data_offset >= idb->idb_bufoffset) &&
1178 1179 (data_offset < (idb->idb_bufoffset + idb->idb_buflen))) {
1179 1180
1180 1181 return (idb);
1181 1182 }
1182 1183 }
1183 1184
1184 1185 return (NULL);
1185 1186 }
1186 1187
1187 1188 void
1188 1189 idm_bufpat_set(idm_buf_t *idb)
1189 1190 {
1190 1191 idm_bufpat_t *bufpat;
1191 1192 int len, i;
1192 1193
1193 1194 len = idb->idb_buflen;
1194 1195 len = (len / sizeof (idm_bufpat_t)) * sizeof (idm_bufpat_t);
1195 1196
1196 1197 bufpat = idb->idb_buf;
1197 1198 for (i = 0; i < len; i += sizeof (idm_bufpat_t)) {
1198 1199 bufpat->bufpat_idb = idb;
1199 1200 bufpat->bufpat_bufmagic = IDM_BUF_MAGIC;
1200 1201 bufpat->bufpat_offset = i;
1201 1202 bufpat++;
1202 1203 }
1203 1204 }
1204 1205
1205 1206 boolean_t
1206 1207 idm_bufpat_check(idm_buf_t *idb, int check_len, idm_bufpat_check_type_t type)
1207 1208 {
1208 1209 idm_bufpat_t *bufpat;
1209 1210 int len, i;
1210 1211
1211 1212 len = (type == BP_CHECK_QUICK) ? sizeof (idm_bufpat_t) : check_len;
1212 1213 len = (len / sizeof (idm_bufpat_t)) * sizeof (idm_bufpat_t);
1213 1214 ASSERT(len <= idb->idb_buflen);
1214 1215 bufpat = idb->idb_buf;
1215 1216
1216 1217 /*
1217 1218 * Don't check the pattern in buffers that came from outside IDM
1218 1219 * (these will be buffers from the initiator that we opted not
1219 1220 * to double-buffer)
1220 1221 */
1221 1222 if (!idb->idb_bufalloc)
1222 1223 return (B_FALSE);
1223 1224
1224 1225 /*
1225 1226 * Return true if we find the pattern anywhere in the buffer
1226 1227 */
1227 1228 for (i = 0; i < len; i += sizeof (idm_bufpat_t)) {
1228 1229 if (BUFPAT_MATCH(bufpat, idb)) {
1229 1230 IDM_CONN_LOG(CE_WARN, "idm_bufpat_check found: "
1230 1231 "idb %p bufpat %p "
1231 1232 "bufpat_idb=%p bufmagic=%08x offset=%08x",
1232 1233 (void *)idb, (void *)bufpat, bufpat->bufpat_idb,
1233 1234 bufpat->bufpat_bufmagic, bufpat->bufpat_offset);
1234 1235 DTRACE_PROBE2(bufpat__pattern__found,
1235 1236 idm_buf_t *, idb, idm_bufpat_t *, bufpat);
1236 1237 if (type == BP_CHECK_ASSERT) {
1237 1238 ASSERT(0);
1238 1239 }
1239 1240 return (B_TRUE);
1240 1241 }
1241 1242 bufpat++;
1242 1243 }
1243 1244
1244 1245 return (B_FALSE);
1245 1246 }
1246 1247
1247 1248 /*
1248 1249 * idm_task_alloc
1249 1250 *
1250 1251 * This function will allocate a idm_task_t structure. A task tag is also
1251 1252 * generated and saved in idt_tt. The task is not active.
1252 1253 */
1253 1254 idm_task_t *
1254 1255 idm_task_alloc(idm_conn_t *ic)
1255 1256 {
1256 1257 idm_task_t *idt;
1257 1258
1258 1259 ASSERT(ic != NULL);
1259 1260
1260 1261 /* Don't allocate new tasks if we are not in FFP */
1261 1262 if (!ic->ic_ffp) {
1262 1263 return (NULL);
1263 1264 }
1264 1265 idt = kmem_cache_alloc(idm.idm_task_cache, KM_NOSLEEP);
1265 1266 if (idt == NULL) {
1266 1267 return (NULL);
1267 1268 }
1268 1269
1269 1270 ASSERT(list_is_empty(&idt->idt_inbufv));
1270 1271 ASSERT(list_is_empty(&idt->idt_outbufv));
1271 1272
1272 1273 mutex_enter(&ic->ic_state_mutex);
1273 1274 if (!ic->ic_ffp) {
1274 1275 mutex_exit(&ic->ic_state_mutex);
1275 1276 kmem_cache_free(idm.idm_task_cache, idt);
1276 1277 return (NULL);
1277 1278 }
1278 1279 idm_conn_hold(ic);
1279 1280 mutex_exit(&ic->ic_state_mutex);
1280 1281
1281 1282 idt->idt_state = TASK_IDLE;
1282 1283 idt->idt_ic = ic;
1283 1284 idt->idt_private = NULL;
1284 1285 idt->idt_exp_datasn = 0;
1285 1286 idt->idt_exp_rttsn = 0;
1286 1287 idt->idt_flags = 0;
1287 1288 return (idt);
1288 1289 }
1289 1290
1290 1291 /*
1291 1292 * idm_task_start
1292 1293 *
1293 1294 * Mark the task active and initialize some stats. The caller
1294 1295 * sets up the idm_task_t structure with a prior call to idm_task_alloc().
1295 1296 * The task service does not function as a task/work engine, it is the
1296 1297 * responsibility of the initiator to start the data transfer and free the
1297 1298 * resources.
1298 1299 */
1299 1300 void
1300 1301 idm_task_start(idm_task_t *idt, uintptr_t handle)
1301 1302 {
1302 1303 ASSERT(idt != NULL);
1303 1304
1304 1305 /* mark the task as ACTIVE */
1305 1306 idt->idt_state = TASK_ACTIVE;
1306 1307 idt->idt_client_handle = handle;
1307 1308 idt->idt_tx_to_ini_start = idt->idt_tx_to_ini_done =
1308 1309 idt->idt_rx_from_ini_start = idt->idt_rx_from_ini_done =
1309 1310 idt->idt_tx_bytes = idt->idt_rx_bytes = 0;
1310 1311 }
1311 1312
1312 1313 /*
1313 1314 * idm_task_done
1314 1315 *
1315 1316 * This function sets the state to indicate that the task is no longer active.
1316 1317 */
1317 1318 void
1318 1319 idm_task_done(idm_task_t *idt)
1319 1320 {
1320 1321 ASSERT(idt != NULL);
1321 1322
1322 1323 mutex_enter(&idt->idt_mutex);
1323 1324 idt->idt_state = TASK_IDLE;
1324 1325 mutex_exit(&idt->idt_mutex);
1325 1326
1326 1327 /*
1327 1328 * Although unlikely it is possible for a reference to come in after
1328 1329 * the client has decided the task is over but before we've marked
1329 1330 * the task idle. One specific unavoidable scenario is the case where
1330 1331 * received PDU with the matching ITT/TTT results in a successful
1331 1332 * lookup of this task. We are at the mercy of the remote node in
1332 1333 * that case so we need to handle it. Now that the task state
1333 1334 * has changed no more references will occur so a simple call to
1334 1335 * idm_refcnt_wait_ref should deal with the situation.
1335 1336 */
1336 1337 idm_refcnt_wait_ref(&idt->idt_refcnt);
1337 1338 idm_refcnt_reset(&idt->idt_refcnt);
1338 1339 }
1339 1340
1340 1341 /*
1341 1342 * idm_task_free
1342 1343 *
1343 1344 * This function will free the Task Tag and the memory allocated for the task
1344 1345 * idm_task_done should be called prior to this call
1345 1346 */
1346 1347 void
1347 1348 idm_task_free(idm_task_t *idt)
1348 1349 {
1349 1350 idm_conn_t *ic;
1350 1351
1351 1352 ASSERT(idt != NULL);
1352 1353 ASSERT(idt->idt_refcnt.ir_refcnt == 0);
1353 1354 ASSERT(idt->idt_state == TASK_IDLE);
1354 1355
1355 1356 ic = idt->idt_ic;
1356 1357
1357 1358 /*
1358 1359 * It's possible for items to still be in the idt_inbufv list if
1359 1360 * they were added after idm_free_task_rsrc was called. We rely on
1360 1361 * STMF to free all buffers associated with the task however STMF
1361 1362 * doesn't know that we have this reference to the buffers.
1362 1363 * Use list_create so that we don't end up with stale references
1363 1364 * to these buffers.
1364 1365 */
1365 1366 list_create(&idt->idt_inbufv, sizeof (idm_buf_t),
1366 1367 offsetof(idm_buf_t, idb_buflink));
1367 1368 list_create(&idt->idt_outbufv, sizeof (idm_buf_t),
1368 1369 offsetof(idm_buf_t, idb_buflink));
1369 1370
1370 1371 kmem_cache_free(idm.idm_task_cache, idt);
1371 1372
1372 1373 idm_conn_rele(ic);
1373 1374 }
1374 1375
1375 1376 /*
1376 1377 * idm_task_find_common
1377 1378 * common code for idm_task_find() and idm_task_find_and_complete()
1378 1379 */
1379 1380 /*ARGSUSED*/
1380 1381 static idm_task_t *
1381 1382 idm_task_find_common(idm_conn_t *ic, uint32_t itt, uint32_t ttt,
1382 1383 boolean_t complete)
1383 1384 {
1384 1385 uint32_t tt, client_handle;
1385 1386 idm_task_t *idt;
1386 1387
1387 1388 /*
1388 1389 * Must match both itt and ttt. The table is indexed by itt
1389 1390 * for initiator connections and ttt for target connections.
1390 1391 */
1391 1392 if (IDM_CONN_ISTGT(ic)) {
1392 1393 tt = ttt;
1393 1394 client_handle = itt;
1394 1395 } else {
1395 1396 tt = itt;
1396 1397 client_handle = ttt;
1397 1398 }
1398 1399
1399 1400 rw_enter(&idm.idm_taskid_table_lock, RW_READER);
1400 1401 if (tt >= idm.idm_taskid_max) {
1401 1402 rw_exit(&idm.idm_taskid_table_lock);
1402 1403 return (NULL);
1403 1404 }
1404 1405
1405 1406 idt = idm.idm_taskid_table[tt];
1406 1407
1407 1408 if (idt != NULL) {
1408 1409 mutex_enter(&idt->idt_mutex);
1409 1410 if ((idt->idt_state != TASK_ACTIVE) ||
1410 1411 (idt->idt_ic != ic) ||
1411 1412 (IDM_CONN_ISTGT(ic) &&
1412 1413 (idt->idt_client_handle != client_handle))) {
1413 1414 /*
1414 1415 * Task doesn't match or task is aborting and
1415 1416 * we don't want any more references.
1416 1417 */
1417 1418 if ((idt->idt_ic != ic) &&
1418 1419 (idt->idt_state == TASK_ACTIVE) &&
1419 1420 (IDM_CONN_ISINI(ic) || idt->idt_client_handle ==
1420 1421 client_handle)) {
1421 1422 IDM_CONN_LOG(CE_WARN,
1422 1423 "idm_task_find: wrong connection %p != %p",
1423 1424 (void *)ic, (void *)idt->idt_ic);
1424 1425 }
1425 1426 mutex_exit(&idt->idt_mutex);
1426 1427 rw_exit(&idm.idm_taskid_table_lock);
1427 1428 return (NULL);
1428 1429 }
1429 1430 idm_task_hold(idt);
1430 1431 /*
1431 1432 * Set the task state to TASK_COMPLETE so it can no longer
1432 1433 * be found or aborted.
1433 1434 */
1434 1435 if (B_TRUE == complete)
1435 1436 idt->idt_state = TASK_COMPLETE;
1436 1437 mutex_exit(&idt->idt_mutex);
1437 1438 }
1438 1439 rw_exit(&idm.idm_taskid_table_lock);
1439 1440
1440 1441 return (idt);
1441 1442 }
1442 1443
1443 1444 /*
1444 1445 * This function looks up a task by task tag.
1445 1446 */
1446 1447 idm_task_t *
1447 1448 idm_task_find(idm_conn_t *ic, uint32_t itt, uint32_t ttt)
1448 1449 {
1449 1450 return (idm_task_find_common(ic, itt, ttt, B_FALSE));
1450 1451 }
1451 1452
1452 1453 /*
1453 1454 * This function looks up a task by task tag. If found, the task state
1454 1455 * is atomically set to TASK_COMPLETE so it can longer be found or aborted.
1455 1456 */
1456 1457 idm_task_t *
1457 1458 idm_task_find_and_complete(idm_conn_t *ic, uint32_t itt, uint32_t ttt)
1458 1459 {
1459 1460 return (idm_task_find_common(ic, itt, ttt, B_TRUE));
1460 1461 }
1461 1462
1462 1463 /*
1463 1464 * idm_task_find_by_handle
1464 1465 *
1465 1466 * This function looks up a task by the client-private idt_client_handle.
1466 1467 *
1467 1468 * This function should NEVER be called in the performance path. It is
1468 1469 * intended strictly for error recovery/task management.
1469 1470 */
1470 1471 /*ARGSUSED*/
1471 1472 void *
1472 1473 idm_task_find_by_handle(idm_conn_t *ic, uintptr_t handle)
1473 1474 {
1474 1475 idm_task_t *idt = NULL;
1475 1476 int idx = 0;
1476 1477
1477 1478 rw_enter(&idm.idm_taskid_table_lock, RW_READER);
1478 1479
1479 1480 for (idx = 0; idx < idm.idm_taskid_max; idx++) {
1480 1481 idt = idm.idm_taskid_table[idx];
1481 1482
1482 1483 if (idt == NULL)
1483 1484 continue;
1484 1485
1485 1486 mutex_enter(&idt->idt_mutex);
1486 1487
1487 1488 if (idt->idt_state != TASK_ACTIVE) {
1488 1489 /*
1489 1490 * Task is either in suspend, abort, or already
1490 1491 * complete.
1491 1492 */
1492 1493 mutex_exit(&idt->idt_mutex);
1493 1494 continue;
1494 1495 }
1495 1496
1496 1497 if (idt->idt_client_handle == handle) {
1497 1498 idm_task_hold(idt);
1498 1499 mutex_exit(&idt->idt_mutex);
1499 1500 break;
1500 1501 }
1501 1502
1502 1503 mutex_exit(&idt->idt_mutex);
1503 1504 }
1504 1505
1505 1506 rw_exit(&idm.idm_taskid_table_lock);
1506 1507
1507 1508 if ((idt == NULL) || (idx == idm.idm_taskid_max))
1508 1509 return (NULL);
1509 1510
1510 1511 return (idt->idt_private);
1511 1512 }
1512 1513
1513 1514 void
1514 1515 idm_task_hold(idm_task_t *idt)
|
↓ open down ↓ |
1440 lines elided |
↑ open up ↑ |
1515 1516 {
1516 1517 idm_refcnt_hold(&idt->idt_refcnt);
1517 1518 }
1518 1519
1519 1520 void
1520 1521 idm_task_rele(idm_task_t *idt)
1521 1522 {
1522 1523 idm_refcnt_rele(&idt->idt_refcnt);
1523 1524 }
1524 1525
1525 -void
1526 +stmf_status_t
1526 1527 idm_task_abort(idm_conn_t *ic, idm_task_t *idt, idm_abort_type_t abort_type)
1527 1528 {
1528 1529 idm_task_t *task;
1529 1530 int idx;
1531 + stmf_status_t s = STMF_SUCCESS;
1530 1532
1531 1533 /*
1532 1534 * Passing NULL as the task indicates that all tasks
1533 1535 * for this connection should be aborted.
1534 1536 */
1535 1537 if (idt == NULL) {
1536 1538 /*
1537 1539 * Only the connection state machine should ask for
1538 1540 * all tasks to abort and this should never happen in FFP.
1539 1541 */
1540 1542 ASSERT(!ic->ic_ffp);
|
↓ open down ↓ |
1 lines elided |
↑ open up ↑ |
1541 1543 rw_enter(&idm.idm_taskid_table_lock, RW_READER);
1542 1544 for (idx = 0; idx < idm.idm_taskid_max; idx++) {
1543 1545 task = idm.idm_taskid_table[idx];
1544 1546 if (task == NULL)
1545 1547 continue;
1546 1548 mutex_enter(&task->idt_mutex);
1547 1549 if ((task->idt_state != TASK_IDLE) &&
1548 1550 (task->idt_state != TASK_COMPLETE) &&
1549 1551 (task->idt_ic == ic)) {
1550 1552 rw_exit(&idm.idm_taskid_table_lock);
1551 - idm_task_abort_one(ic, task, abort_type);
1553 + s = idm_task_abort_one(ic, task, abort_type);
1552 1554 rw_enter(&idm.idm_taskid_table_lock, RW_READER);
1553 1555 } else
1554 1556 mutex_exit(&task->idt_mutex);
1555 1557 }
1556 1558 rw_exit(&idm.idm_taskid_table_lock);
1557 1559 } else {
1558 1560 mutex_enter(&idt->idt_mutex);
1559 - idm_task_abort_one(ic, idt, abort_type);
1561 + s = idm_task_abort_one(ic, idt, abort_type);
1560 1562 }
1563 + return (s);
1561 1564 }
1562 1565
1563 1566 static void
1564 1567 idm_task_abort_unref_cb(void *ref)
1565 1568 {
1566 1569 idm_task_t *idt = ref;
1567 1570
1568 1571 mutex_enter(&idt->idt_mutex);
1569 1572 switch (idt->idt_state) {
1570 1573 case TASK_SUSPENDING:
1571 1574 idt->idt_state = TASK_SUSPENDED;
1572 1575 mutex_exit(&idt->idt_mutex);
1573 1576 idm_task_aborted(idt, IDM_STATUS_SUSPENDED);
1574 1577 return;
1575 1578 case TASK_ABORTING:
1576 1579 idt->idt_state = TASK_ABORTED;
1577 1580 mutex_exit(&idt->idt_mutex);
1578 1581 idm_task_aborted(idt, IDM_STATUS_ABORTED);
1579 1582 return;
1580 1583 default:
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
1581 1584 mutex_exit(&idt->idt_mutex);
1582 1585 ASSERT(0);
1583 1586 break;
1584 1587 }
1585 1588 }
1586 1589
1587 1590 /*
1588 1591 * Abort the idm task.
1589 1592 * Caller must hold the task mutex, which will be released before return
1590 1593 */
1591 -static void
1594 +static stmf_status_t
1592 1595 idm_task_abort_one(idm_conn_t *ic, idm_task_t *idt, idm_abort_type_t abort_type)
1593 1596 {
1597 + stmf_status_t s = STMF_SUCCESS;
1598 +
1594 1599 /* Caller must hold connection mutex */
1595 1600 ASSERT(mutex_owned(&idt->idt_mutex));
1596 1601 switch (idt->idt_state) {
1597 1602 case TASK_ACTIVE:
1598 1603 switch (abort_type) {
1599 1604 case AT_INTERNAL_SUSPEND:
1600 1605 /* Call transport to release any resources */
1601 1606 idt->idt_state = TASK_SUSPENDING;
1602 1607 mutex_exit(&idt->idt_mutex);
1603 1608 ic->ic_transport_ops->it_free_task_rsrc(idt);
1604 1609
1605 1610 /*
1606 1611 * Wait for outstanding references. When all
1607 1612 * references are released the callback will call
1608 1613 * idm_task_aborted().
1609 1614 */
1610 1615 idm_refcnt_async_wait_ref(&idt->idt_refcnt,
1611 1616 &idm_task_abort_unref_cb);
1612 - return;
1617 + return (s);
1613 1618 case AT_INTERNAL_ABORT:
1614 1619 case AT_TASK_MGMT_ABORT:
1615 1620 idt->idt_state = TASK_ABORTING;
1616 1621 mutex_exit(&idt->idt_mutex);
1617 1622 ic->ic_transport_ops->it_free_task_rsrc(idt);
1618 1623
1619 1624 /*
1620 1625 * Wait for outstanding references. When all
1621 1626 * references are released the callback will call
1622 1627 * idm_task_aborted().
1623 1628 */
1624 1629 idm_refcnt_async_wait_ref(&idt->idt_refcnt,
1625 1630 &idm_task_abort_unref_cb);
1626 - return;
1631 + return (s);
1627 1632 default:
1628 1633 ASSERT(0);
1629 1634 }
1630 1635 break;
1631 1636 case TASK_SUSPENDING:
1632 1637 /* Already called transport_free_task_rsrc(); */
1633 1638 switch (abort_type) {
1634 1639 case AT_INTERNAL_SUSPEND:
1635 1640 /* Already doing it */
1636 1641 break;
1637 1642 case AT_INTERNAL_ABORT:
1638 1643 case AT_TASK_MGMT_ABORT:
1639 1644 idt->idt_state = TASK_ABORTING;
1640 1645 break;
1641 1646 default:
1642 1647 ASSERT(0);
1643 1648 }
1644 1649 break;
1645 1650 case TASK_SUSPENDED:
1646 1651 /* Already called transport_free_task_rsrc(); */
1647 1652 switch (abort_type) {
1648 1653 case AT_INTERNAL_SUSPEND:
1649 1654 /* Already doing it */
1650 1655 break;
1651 1656 case AT_INTERNAL_ABORT:
1652 1657 case AT_TASK_MGMT_ABORT:
1653 1658 idt->idt_state = TASK_ABORTING;
1654 1659 mutex_exit(&idt->idt_mutex);
1655 1660
|
↓ open down ↓ |
19 lines elided |
↑ open up ↑ |
1656 1661 /*
1657 1662 * We could probably call idm_task_aborted directly
1658 1663 * here but we may be holding the conn lock. It's
1659 1664 * easier to just switch contexts. Even though
1660 1665 * we shouldn't really have any references we'll
1661 1666 * set the state to TASK_ABORTING instead of
1662 1667 * TASK_ABORTED so we can use the same code path.
1663 1668 */
1664 1669 idm_refcnt_async_wait_ref(&idt->idt_refcnt,
1665 1670 &idm_task_abort_unref_cb);
1666 - return;
1671 + return (s);
1667 1672 default:
1668 1673 ASSERT(0);
1669 1674 }
1670 1675 break;
1671 1676 case TASK_ABORTING:
1672 1677 case TASK_ABORTED:
1673 1678 switch (abort_type) {
1674 1679 case AT_INTERNAL_SUSPEND:
1675 1680 /* We're already past this point... */
1676 1681 case AT_INTERNAL_ABORT:
1677 1682 case AT_TASK_MGMT_ABORT:
1678 1683 /* Already doing it */
1679 1684 break;
1680 1685 default:
1681 1686 ASSERT(0);
1682 1687 }
1683 1688 break;
1684 1689 case TASK_COMPLETE:
1685 - /*
1686 - * In this case, let it go. The status has already been
1687 - * sent (which may or may not get successfully transmitted)
1688 - * and we don't want to end up in a race between completing
1689 - * the status PDU and marking the task suspended.
1690 - */
1690 + idm_refcnt_wait_ref(&idt->idt_refcnt);
1691 + s = STMF_ABORT_SUCCESS;
1691 1692 break;
1692 1693 default:
1693 1694 ASSERT(0);
1694 1695 }
1695 1696 mutex_exit(&idt->idt_mutex);
1697 +
1698 + return (s);
1696 1699 }
1697 1700
1698 1701 static void
1699 1702 idm_task_aborted(idm_task_t *idt, idm_status_t status)
1700 1703 {
1701 1704 (*idt->idt_ic->ic_conn_ops.icb_task_aborted)(idt, status);
1702 1705 }
1703 1706
1704 1707 /*
1705 1708 * idm_pdu_tx
1706 1709 *
1707 1710 * This is IDM's implementation of the 'Send_Control' operational primitive.
1708 1711 * This function is invoked by an initiator iSCSI layer requesting the transfer
1709 1712 * of a iSCSI command PDU or a target iSCSI layer requesting the transfer of a
1710 1713 * iSCSI response PDU. The PDU will be transmitted as-is by the local Datamover
1711 1714 * layer to the peer iSCSI layer in the remote iSCSI node. The connection info
1712 1715 * and iSCSI PDU-specific qualifiers namely BHS, AHS, DataDescriptor and Size
1713 1716 * are provided as input.
1714 1717 *
1715 1718 */
1716 1719 void
1717 1720 idm_pdu_tx(idm_pdu_t *pdu)
1718 1721 {
1719 1722 idm_conn_t *ic = pdu->isp_ic;
1720 1723 iscsi_async_evt_hdr_t *async_evt;
1721 1724
1722 1725 /*
1723 1726 * If we are in full-featured mode then route SCSI-related
1724 1727 * commands to the appropriate function vector without checking
1725 1728 * the connection state. We will only be in full-feature mode
1726 1729 * when we are in an acceptable state for SCSI PDU's.
1727 1730 *
1728 1731 * We also need to ensure that there are no PDU events outstanding
1729 1732 * on the state machine. Any non-SCSI PDU's received in full-feature
1730 1733 * mode will result in PDU events and until these have been handled
1731 1734 * we need to route all PDU's through the state machine as PDU
1732 1735 * events to maintain ordering.
1733 1736 *
1734 1737 * Note that IDM cannot enter FFP mode until it processes in
1735 1738 * its state machine the last xmit of the login process.
1736 1739 * Hence, checking the IDM_PDU_LOGIN_TX flag here would be
1737 1740 * superfluous.
1738 1741 */
1739 1742 mutex_enter(&ic->ic_state_mutex);
1740 1743 if (ic->ic_ffp && (ic->ic_pdu_events == 0)) {
1741 1744 mutex_exit(&ic->ic_state_mutex);
1742 1745 switch (IDM_PDU_OPCODE(pdu)) {
1743 1746 case ISCSI_OP_SCSI_RSP:
1744 1747 /* Target only */
1745 1748 DTRACE_ISCSI_2(scsi__response, idm_conn_t *, ic,
1746 1749 iscsi_scsi_rsp_hdr_t *,
1747 1750 (iscsi_scsi_rsp_hdr_t *)pdu->isp_hdr);
1748 1751 idm_pdu_tx_forward(ic, pdu);
1749 1752 return;
1750 1753 case ISCSI_OP_SCSI_TASK_MGT_RSP:
1751 1754 /* Target only */
1752 1755 DTRACE_ISCSI_2(task__response, idm_conn_t *, ic,
1753 1756 iscsi_text_rsp_hdr_t *,
1754 1757 (iscsi_text_rsp_hdr_t *)pdu->isp_hdr);
1755 1758 idm_pdu_tx_forward(ic, pdu);
1756 1759 return;
1757 1760 case ISCSI_OP_SCSI_DATA_RSP:
1758 1761 /* Target only */
1759 1762 DTRACE_ISCSI_2(data__send, idm_conn_t *, ic,
1760 1763 iscsi_data_rsp_hdr_t *,
1761 1764 (iscsi_data_rsp_hdr_t *)pdu->isp_hdr);
1762 1765 idm_pdu_tx_forward(ic, pdu);
1763 1766 return;
1764 1767 case ISCSI_OP_RTT_RSP:
1765 1768 /* Target only */
1766 1769 DTRACE_ISCSI_2(data__request, idm_conn_t *, ic,
1767 1770 iscsi_rtt_hdr_t *,
1768 1771 (iscsi_rtt_hdr_t *)pdu->isp_hdr);
1769 1772 idm_pdu_tx_forward(ic, pdu);
1770 1773 return;
1771 1774 case ISCSI_OP_NOOP_IN:
1772 1775 /* Target only */
1773 1776 DTRACE_ISCSI_2(nop__send, idm_conn_t *, ic,
1774 1777 iscsi_nop_in_hdr_t *,
1775 1778 (iscsi_nop_in_hdr_t *)pdu->isp_hdr);
1776 1779 idm_pdu_tx_forward(ic, pdu);
1777 1780 return;
1778 1781 case ISCSI_OP_TEXT_RSP:
1779 1782 /* Target only */
1780 1783 DTRACE_ISCSI_2(text__response, idm_conn_t *, ic,
1781 1784 iscsi_text_rsp_hdr_t *,
1782 1785 (iscsi_text_rsp_hdr_t *)pdu->isp_hdr);
1783 1786 idm_pdu_tx_forward(ic, pdu);
1784 1787 return;
1785 1788 case ISCSI_OP_TEXT_CMD:
1786 1789 case ISCSI_OP_NOOP_OUT:
1787 1790 case ISCSI_OP_SCSI_CMD:
1788 1791 case ISCSI_OP_SCSI_DATA:
1789 1792 case ISCSI_OP_SCSI_TASK_MGT_MSG:
1790 1793 /* Initiator only */
1791 1794 idm_pdu_tx_forward(ic, pdu);
1792 1795 return;
1793 1796 default:
1794 1797 break;
1795 1798 }
1796 1799
1797 1800 mutex_enter(&ic->ic_state_mutex);
1798 1801 }
1799 1802
1800 1803 /*
1801 1804 * Any PDU's processed outside of full-feature mode and non-SCSI
1802 1805 * PDU's in full-feature mode are handled by generating an
1803 1806 * event to the connection state machine. The state machine
1804 1807 * will validate the PDU against the current state and either
1805 1808 * transmit the PDU if the opcode is allowed or handle an
1806 1809 * error if the PDU is not allowed.
1807 1810 *
1808 1811 * This code-path will also generate any events that are implied
1809 1812 * by the PDU opcode. For example a "login response" with success
1810 1813 * status generates a CE_LOGOUT_SUCCESS_SND event.
1811 1814 */
1812 1815 switch (IDM_PDU_OPCODE(pdu)) {
1813 1816 case ISCSI_OP_LOGIN_CMD:
1814 1817 idm_conn_tx_pdu_event(ic, CE_LOGIN_SND, (uintptr_t)pdu);
1815 1818 break;
1816 1819 case ISCSI_OP_LOGIN_RSP:
1817 1820 DTRACE_ISCSI_2(login__response, idm_conn_t *, ic,
1818 1821 iscsi_login_rsp_hdr_t *,
1819 1822 (iscsi_login_rsp_hdr_t *)pdu->isp_hdr);
1820 1823 idm_parse_login_rsp(ic, pdu, /* Is RX */ B_FALSE);
1821 1824 break;
1822 1825 case ISCSI_OP_LOGOUT_CMD:
1823 1826 idm_parse_logout_req(ic, pdu, /* Is RX */ B_FALSE);
1824 1827 break;
1825 1828 case ISCSI_OP_LOGOUT_RSP:
1826 1829 DTRACE_ISCSI_2(logout__response, idm_conn_t *, ic,
1827 1830 iscsi_logout_rsp_hdr_t *,
1828 1831 (iscsi_logout_rsp_hdr_t *)pdu->isp_hdr);
1829 1832 idm_parse_logout_rsp(ic, pdu, /* Is RX */ B_FALSE);
1830 1833 break;
1831 1834 case ISCSI_OP_ASYNC_EVENT:
1832 1835 DTRACE_ISCSI_2(async__send, idm_conn_t *, ic,
1833 1836 iscsi_async_evt_hdr_t *,
1834 1837 (iscsi_async_evt_hdr_t *)pdu->isp_hdr);
1835 1838 async_evt = (iscsi_async_evt_hdr_t *)pdu->isp_hdr;
1836 1839 switch (async_evt->async_event) {
1837 1840 case ISCSI_ASYNC_EVENT_REQUEST_LOGOUT:
1838 1841 idm_conn_tx_pdu_event(ic, CE_ASYNC_LOGOUT_SND,
1839 1842 (uintptr_t)pdu);
1840 1843 break;
1841 1844 case ISCSI_ASYNC_EVENT_DROPPING_CONNECTION:
1842 1845 idm_conn_tx_pdu_event(ic, CE_ASYNC_DROP_CONN_SND,
1843 1846 (uintptr_t)pdu);
1844 1847 break;
1845 1848 case ISCSI_ASYNC_EVENT_DROPPING_ALL_CONNECTIONS:
1846 1849 idm_conn_tx_pdu_event(ic, CE_ASYNC_DROP_ALL_CONN_SND,
1847 1850 (uintptr_t)pdu);
1848 1851 break;
1849 1852 case ISCSI_ASYNC_EVENT_SCSI_EVENT:
1850 1853 case ISCSI_ASYNC_EVENT_PARAM_NEGOTIATION:
1851 1854 default:
1852 1855 idm_conn_tx_pdu_event(ic, CE_MISC_TX,
1853 1856 (uintptr_t)pdu);
1854 1857 break;
1855 1858 }
1856 1859 break;
1857 1860 case ISCSI_OP_SCSI_RSP:
1858 1861 /* Target only */
1859 1862 DTRACE_ISCSI_2(scsi__response, idm_conn_t *, ic,
1860 1863 iscsi_scsi_rsp_hdr_t *,
1861 1864 (iscsi_scsi_rsp_hdr_t *)pdu->isp_hdr);
1862 1865 idm_conn_tx_pdu_event(ic, CE_MISC_TX, (uintptr_t)pdu);
1863 1866 break;
1864 1867 case ISCSI_OP_SCSI_TASK_MGT_RSP:
1865 1868 /* Target only */
1866 1869 DTRACE_ISCSI_2(task__response, idm_conn_t *, ic,
1867 1870 iscsi_scsi_task_mgt_rsp_hdr_t *,
1868 1871 (iscsi_scsi_task_mgt_rsp_hdr_t *)pdu->isp_hdr);
1869 1872 idm_conn_tx_pdu_event(ic, CE_MISC_TX, (uintptr_t)pdu);
1870 1873 break;
1871 1874 case ISCSI_OP_SCSI_DATA_RSP:
1872 1875 /* Target only */
1873 1876 DTRACE_ISCSI_2(data__send, idm_conn_t *, ic,
1874 1877 iscsi_data_rsp_hdr_t *,
1875 1878 (iscsi_data_rsp_hdr_t *)pdu->isp_hdr);
1876 1879 idm_conn_tx_pdu_event(ic, CE_MISC_TX, (uintptr_t)pdu);
1877 1880 break;
1878 1881 case ISCSI_OP_RTT_RSP:
1879 1882 /* Target only */
1880 1883 DTRACE_ISCSI_2(data__request, idm_conn_t *, ic,
1881 1884 iscsi_rtt_hdr_t *,
1882 1885 (iscsi_rtt_hdr_t *)pdu->isp_hdr);
1883 1886 idm_conn_tx_pdu_event(ic, CE_MISC_TX, (uintptr_t)pdu);
1884 1887 break;
1885 1888 case ISCSI_OP_NOOP_IN:
1886 1889 /* Target only */
1887 1890 DTRACE_ISCSI_2(nop__send, idm_conn_t *, ic,
1888 1891 iscsi_nop_in_hdr_t *,
1889 1892 (iscsi_nop_in_hdr_t *)pdu->isp_hdr);
1890 1893 idm_conn_tx_pdu_event(ic, CE_MISC_TX, (uintptr_t)pdu);
1891 1894 break;
1892 1895 case ISCSI_OP_TEXT_RSP:
1893 1896 /* Target only */
1894 1897 DTRACE_ISCSI_2(text__response, idm_conn_t *, ic,
1895 1898 iscsi_text_rsp_hdr_t *,
1896 1899 (iscsi_text_rsp_hdr_t *)pdu->isp_hdr);
1897 1900 idm_conn_tx_pdu_event(ic, CE_MISC_TX, (uintptr_t)pdu);
1898 1901 break;
1899 1902 /* Initiator only */
1900 1903 case ISCSI_OP_SCSI_CMD:
1901 1904 case ISCSI_OP_SCSI_TASK_MGT_MSG:
1902 1905 case ISCSI_OP_SCSI_DATA:
1903 1906 case ISCSI_OP_NOOP_OUT:
1904 1907 case ISCSI_OP_TEXT_CMD:
1905 1908 case ISCSI_OP_SNACK_CMD:
1906 1909 case ISCSI_OP_REJECT_MSG:
1907 1910 default:
1908 1911 /*
1909 1912 * Connection state machine will validate these PDU's against
1910 1913 * the current state. A PDU not allowed in the current
1911 1914 * state will cause a protocol error.
1912 1915 */
1913 1916 idm_conn_tx_pdu_event(ic, CE_MISC_TX, (uintptr_t)pdu);
1914 1917 break;
1915 1918 }
1916 1919 mutex_exit(&ic->ic_state_mutex);
1917 1920 }
1918 1921
1919 1922 /*
1920 1923 * Common allocation of a PDU along with memory for header and data.
1921 1924 */
1922 1925 static idm_pdu_t *
1923 1926 idm_pdu_alloc_common(uint_t hdrlen, uint_t datalen, int sleepflag)
1924 1927 {
1925 1928 idm_pdu_t *result;
1926 1929
1927 1930 /*
1928 1931 * IDM clients should cache these structures for performance
1929 1932 * critical paths. We can't cache effectively in IDM because we
1930 1933 * don't know the correct header and data size.
1931 1934 *
1932 1935 * Valid header length is assumed to be hdrlen and valid data
1933 1936 * length is assumed to be datalen. isp_hdrlen and isp_datalen
1934 1937 * can be adjusted after the PDU is returned if necessary.
1935 1938 */
1936 1939 result = kmem_zalloc(sizeof (idm_pdu_t) + hdrlen + datalen, sleepflag);
1937 1940 if (result != NULL) {
1938 1941 /* For idm_pdu_free sanity check */
1939 1942 result->isp_flags |= IDM_PDU_ALLOC;
1940 1943 /* pointer arithmetic */
1941 1944 result->isp_hdr = (iscsi_hdr_t *)(result + 1);
1942 1945 result->isp_hdrlen = hdrlen;
1943 1946 result->isp_hdrbuflen = hdrlen;
1944 1947 result->isp_transport_hdrlen = 0;
1945 1948 if (datalen != 0)
1946 1949 result->isp_data = (uint8_t *)result->isp_hdr + hdrlen;
1947 1950 result->isp_datalen = datalen;
1948 1951 result->isp_databuflen = datalen;
1949 1952 result->isp_magic = IDM_PDU_MAGIC;
1950 1953 }
1951 1954
1952 1955 return (result);
1953 1956 }
1954 1957
1955 1958 /*
1956 1959 * Typical idm_pdu_alloc invocation, will block for resources.
1957 1960 */
1958 1961 idm_pdu_t *
1959 1962 idm_pdu_alloc(uint_t hdrlen, uint_t datalen)
1960 1963 {
1961 1964 return (idm_pdu_alloc_common(hdrlen, datalen, KM_SLEEP));
1962 1965 }
1963 1966
1964 1967 /*
1965 1968 * Non-blocking idm_pdu_alloc implementation, returns NULL if resources
1966 1969 * are not available. Needed for transport-layer allocations which may
1967 1970 * be invoking in interrupt context.
1968 1971 */
1969 1972 idm_pdu_t *
1970 1973 idm_pdu_alloc_nosleep(uint_t hdrlen, uint_t datalen)
1971 1974 {
1972 1975 return (idm_pdu_alloc_common(hdrlen, datalen, KM_NOSLEEP));
1973 1976 }
1974 1977
1975 1978 /*
1976 1979 * Free a PDU previously allocated with idm_pdu_alloc() including any
1977 1980 * header and data space allocated as part of the original request.
1978 1981 * Additional memory regions referenced by subsequent modification of
1979 1982 * the isp_hdr and/or isp_data fields will not be freed.
1980 1983 */
1981 1984 void
1982 1985 idm_pdu_free(idm_pdu_t *pdu)
1983 1986 {
1984 1987 /* Make sure the structure was allocated using idm_pdu_alloc() */
1985 1988 ASSERT(pdu->isp_flags & IDM_PDU_ALLOC);
1986 1989 kmem_free(pdu,
1987 1990 sizeof (idm_pdu_t) + pdu->isp_hdrbuflen + pdu->isp_databuflen);
1988 1991 }
1989 1992
1990 1993 /*
1991 1994 * Initialize the connection, private and callback fields in a PDU.
1992 1995 */
1993 1996 void
1994 1997 idm_pdu_init(idm_pdu_t *pdu, idm_conn_t *ic, void *private, idm_pdu_cb_t *cb)
1995 1998 {
1996 1999 /*
1997 2000 * idm_pdu_complete() will call idm_pdu_free if the callback is
1998 2001 * NULL. This will only work if the PDU was originally allocated
1999 2002 * with idm_pdu_alloc().
2000 2003 */
2001 2004 ASSERT((pdu->isp_flags & IDM_PDU_ALLOC) ||
2002 2005 (cb != NULL));
2003 2006 pdu->isp_magic = IDM_PDU_MAGIC;
2004 2007 pdu->isp_ic = ic;
2005 2008 pdu->isp_private = private;
2006 2009 pdu->isp_callback = cb;
2007 2010 }
2008 2011
2009 2012 /*
2010 2013 * Initialize the header and header length field. This function should
2011 2014 * not be used to adjust the header length in a buffer allocated via
2012 2015 * pdu_pdu_alloc since it overwrites the existing header pointer.
2013 2016 */
2014 2017 void
2015 2018 idm_pdu_init_hdr(idm_pdu_t *pdu, uint8_t *hdr, uint_t hdrlen)
2016 2019 {
2017 2020 pdu->isp_hdr = (iscsi_hdr_t *)((void *)hdr);
2018 2021 pdu->isp_hdrlen = hdrlen;
2019 2022 }
2020 2023
2021 2024 /*
2022 2025 * Initialize the data and data length fields. This function should
2023 2026 * not be used to adjust the data length of a buffer allocated via
2024 2027 * idm_pdu_alloc since it overwrites the existing data pointer.
2025 2028 */
2026 2029 void
2027 2030 idm_pdu_init_data(idm_pdu_t *pdu, uint8_t *data, uint_t datalen)
2028 2031 {
2029 2032 pdu->isp_data = data;
2030 2033 pdu->isp_datalen = datalen;
2031 2034 }
2032 2035
2033 2036 void
2034 2037 idm_pdu_complete(idm_pdu_t *pdu, idm_status_t status)
2035 2038 {
2036 2039 if (pdu->isp_callback) {
2037 2040 pdu->isp_status = status;
2038 2041 (*pdu->isp_callback)(pdu, status);
2039 2042 } else {
2040 2043 idm_pdu_free(pdu);
2041 2044 }
2042 2045 }
2043 2046
2044 2047 /*
2045 2048 * State machine auditing
2046 2049 */
2047 2050
2048 2051 void
2049 2052 idm_sm_audit_init(sm_audit_buf_t *audit_buf)
2050 2053 {
2051 2054 bzero(audit_buf, sizeof (sm_audit_buf_t));
2052 2055 audit_buf->sab_max_index = SM_AUDIT_BUF_MAX_REC - 1;
2053 2056 }
2054 2057
2055 2058 static
2056 2059 sm_audit_record_t *
2057 2060 idm_sm_audit_common(sm_audit_buf_t *audit_buf, sm_audit_record_type_t r_type,
2058 2061 sm_audit_sm_type_t sm_type,
2059 2062 int current_state)
2060 2063 {
2061 2064 sm_audit_record_t *sar;
2062 2065
2063 2066 sar = audit_buf->sab_records;
2064 2067 sar += audit_buf->sab_index;
2065 2068 audit_buf->sab_index++;
2066 2069 audit_buf->sab_index &= audit_buf->sab_max_index;
2067 2070
2068 2071 sar->sar_type = r_type;
2069 2072 gethrestime(&sar->sar_timestamp);
2070 2073 sar->sar_sm_type = sm_type;
2071 2074 sar->sar_state = current_state;
2072 2075
2073 2076 return (sar);
2074 2077 }
2075 2078
2076 2079 void
2077 2080 idm_sm_audit_event(sm_audit_buf_t *audit_buf,
2078 2081 sm_audit_sm_type_t sm_type, int current_state,
2079 2082 int event, uintptr_t event_info)
2080 2083 {
2081 2084 sm_audit_record_t *sar;
2082 2085
2083 2086 sar = idm_sm_audit_common(audit_buf, SAR_STATE_EVENT,
2084 2087 sm_type, current_state);
2085 2088 sar->sar_event = event;
2086 2089 sar->sar_event_info = event_info;
2087 2090 }
2088 2091
2089 2092 void
2090 2093 idm_sm_audit_state_change(sm_audit_buf_t *audit_buf,
2091 2094 sm_audit_sm_type_t sm_type, int current_state, int new_state)
2092 2095 {
2093 2096 sm_audit_record_t *sar;
2094 2097
2095 2098 sar = idm_sm_audit_common(audit_buf, SAR_STATE_CHANGE,
2096 2099 sm_type, current_state);
2097 2100 sar->sar_new_state = new_state;
2098 2101 }
2099 2102
2100 2103
2101 2104 /*
2102 2105 * Object reference tracking
2103 2106 */
2104 2107
2105 2108 void
2106 2109 idm_refcnt_init(idm_refcnt_t *refcnt, void *referenced_obj)
2107 2110 {
2108 2111 bzero(refcnt, sizeof (*refcnt));
2109 2112 idm_refcnt_reset(refcnt);
2110 2113 refcnt->ir_referenced_obj = referenced_obj;
2111 2114 bzero(&refcnt->ir_audit_buf, sizeof (refcnt_audit_buf_t));
2112 2115 refcnt->ir_audit_buf.anb_max_index = REFCNT_AUDIT_BUF_MAX_REC - 1;
2113 2116 mutex_init(&refcnt->ir_mutex, NULL, MUTEX_DEFAULT, NULL);
2114 2117 cv_init(&refcnt->ir_cv, NULL, CV_DEFAULT, NULL);
2115 2118 }
2116 2119
2117 2120 void
2118 2121 idm_refcnt_destroy(idm_refcnt_t *refcnt)
2119 2122 {
2120 2123 /*
2121 2124 * Grab the mutex to there are no other lingering threads holding
2122 2125 * the mutex before we destroy it (e.g. idm_refcnt_rele just after
2123 2126 * the refcnt goes to zero if ir_waiting == REF_WAIT_ASYNC)
2124 2127 */
2125 2128 mutex_enter(&refcnt->ir_mutex);
2126 2129 ASSERT(refcnt->ir_refcnt == 0);
2127 2130 cv_destroy(&refcnt->ir_cv);
2128 2131 mutex_destroy(&refcnt->ir_mutex);
2129 2132 }
2130 2133
2131 2134 void
2132 2135 idm_refcnt_reset(idm_refcnt_t *refcnt)
2133 2136 {
2134 2137 refcnt->ir_waiting = REF_NOWAIT;
2135 2138 refcnt->ir_refcnt = 0;
2136 2139 }
2137 2140
2138 2141 void
2139 2142 idm_refcnt_hold(idm_refcnt_t *refcnt)
2140 2143 {
2141 2144 /*
2142 2145 * Nothing should take a hold on an object after a call to
2143 2146 * idm_refcnt_wait_ref or idm_refcnd_async_wait_ref
2144 2147 */
2145 2148 ASSERT(refcnt->ir_waiting == REF_NOWAIT);
2146 2149
2147 2150 mutex_enter(&refcnt->ir_mutex);
|
↓ open down ↓ |
442 lines elided |
↑ open up ↑ |
2148 2151 refcnt->ir_refcnt++;
2149 2152 REFCNT_AUDIT(refcnt);
2150 2153 mutex_exit(&refcnt->ir_mutex);
2151 2154 }
2152 2155
2153 2156 static void
2154 2157 idm_refcnt_unref_task(void *refcnt_void)
2155 2158 {
2156 2159 idm_refcnt_t *refcnt = refcnt_void;
2157 2160
2161 + mutex_enter(&refcnt->ir_mutex);
2158 2162 REFCNT_AUDIT(refcnt);
2163 + mutex_exit(&refcnt->ir_mutex);
2159 2164 (*refcnt->ir_cb)(refcnt->ir_referenced_obj);
2160 2165 }
2161 2166
2162 2167 void
2163 2168 idm_refcnt_rele(idm_refcnt_t *refcnt)
2164 2169 {
2165 2170 mutex_enter(&refcnt->ir_mutex);
2166 2171 ASSERT(refcnt->ir_refcnt > 0);
2167 2172 refcnt->ir_refcnt--;
2168 2173 REFCNT_AUDIT(refcnt);
2169 2174 if (refcnt->ir_waiting == REF_NOWAIT) {
2170 2175 /* No one is waiting on this object */
2171 2176 mutex_exit(&refcnt->ir_mutex);
2172 2177 return;
2173 2178 }
2174 2179
2175 2180 /*
2176 2181 * Someone is waiting for this object to go idle so check if
2177 2182 * refcnt is 0. Waiting on an object then later grabbing another
2178 2183 * reference is not allowed so we don't need to handle that case.
2179 2184 */
2180 2185 if (refcnt->ir_refcnt == 0) {
2181 2186 if (refcnt->ir_waiting == REF_WAIT_ASYNC) {
2182 2187 if (taskq_dispatch(idm.idm_global_taskq,
2183 2188 &idm_refcnt_unref_task, refcnt, TQ_SLEEP) == NULL) {
2184 2189 cmn_err(CE_WARN,
2185 2190 "idm_refcnt_rele: Couldn't dispatch task");
2186 2191 }
2187 2192 } else if (refcnt->ir_waiting == REF_WAIT_SYNC) {
2188 2193 cv_signal(&refcnt->ir_cv);
2189 2194 }
2190 2195 }
2191 2196 mutex_exit(&refcnt->ir_mutex);
2192 2197 }
2193 2198
2194 2199 void
2195 2200 idm_refcnt_rele_and_destroy(idm_refcnt_t *refcnt, idm_refcnt_cb_t *cb_func)
2196 2201 {
2197 2202 mutex_enter(&refcnt->ir_mutex);
2198 2203 ASSERT(refcnt->ir_refcnt > 0);
2199 2204 refcnt->ir_refcnt--;
2200 2205 REFCNT_AUDIT(refcnt);
2201 2206
2202 2207 /*
2203 2208 * Someone is waiting for this object to go idle so check if
2204 2209 * refcnt is 0. Waiting on an object then later grabbing another
2205 2210 * reference is not allowed so we don't need to handle that case.
2206 2211 */
2207 2212 if (refcnt->ir_refcnt == 0) {
2208 2213 refcnt->ir_cb = cb_func;
2209 2214 refcnt->ir_waiting = REF_WAIT_ASYNC;
2210 2215 if (taskq_dispatch(idm.idm_global_taskq,
2211 2216 &idm_refcnt_unref_task, refcnt, TQ_SLEEP) == NULL) {
2212 2217 cmn_err(CE_WARN,
2213 2218 "idm_refcnt_rele: Couldn't dispatch task");
2214 2219 }
2215 2220 }
2216 2221 mutex_exit(&refcnt->ir_mutex);
2217 2222 }
2218 2223
2219 2224 void
2220 2225 idm_refcnt_wait_ref(idm_refcnt_t *refcnt)
2221 2226 {
2222 2227 mutex_enter(&refcnt->ir_mutex);
2223 2228 refcnt->ir_waiting = REF_WAIT_SYNC;
2224 2229 REFCNT_AUDIT(refcnt);
2225 2230 while (refcnt->ir_refcnt != 0)
2226 2231 cv_wait(&refcnt->ir_cv, &refcnt->ir_mutex);
2227 2232 mutex_exit(&refcnt->ir_mutex);
2228 2233 }
2229 2234
2230 2235 void
2231 2236 idm_refcnt_async_wait_ref(idm_refcnt_t *refcnt, idm_refcnt_cb_t *cb_func)
2232 2237 {
2233 2238 mutex_enter(&refcnt->ir_mutex);
2234 2239 refcnt->ir_waiting = REF_WAIT_ASYNC;
2235 2240 refcnt->ir_cb = cb_func;
2236 2241 REFCNT_AUDIT(refcnt);
2237 2242 /*
2238 2243 * It's possible we don't have any references. To make things easier
2239 2244 * on the caller use a taskq to call the callback instead of
2240 2245 * calling it synchronously
2241 2246 */
2242 2247 if (refcnt->ir_refcnt == 0) {
2243 2248 if (taskq_dispatch(idm.idm_global_taskq,
2244 2249 &idm_refcnt_unref_task, refcnt, TQ_SLEEP) == NULL) {
2245 2250 cmn_err(CE_WARN,
2246 2251 "idm_refcnt_async_wait_ref: "
2247 2252 "Couldn't dispatch task");
2248 2253 }
2249 2254 }
2250 2255 mutex_exit(&refcnt->ir_mutex);
2251 2256 }
2252 2257
2253 2258 void
2254 2259 idm_refcnt_destroy_unref_obj(idm_refcnt_t *refcnt,
2255 2260 idm_refcnt_cb_t *cb_func)
|
↓ open down ↓ |
87 lines elided |
↑ open up ↑ |
2256 2261 {
2257 2262 mutex_enter(&refcnt->ir_mutex);
2258 2263 if (refcnt->ir_refcnt == 0) {
2259 2264 mutex_exit(&refcnt->ir_mutex);
2260 2265 (*cb_func)(refcnt->ir_referenced_obj);
2261 2266 return;
2262 2267 }
2263 2268 mutex_exit(&refcnt->ir_mutex);
2264 2269 }
2265 2270
2271 +/*
2272 + * used to determine the status of the refcnt.
2273 + *
2274 + * if refcnt is 0 return is 0
2275 + * if refcnt is negative return is -1
2276 + * if refcnt > 0 and no waiters return is 1
2277 + * if refcnt > 0 and waiters return is 2
2278 + */
2279 +int
2280 +idm_refcnt_is_held(idm_refcnt_t *refcnt)
2281 +{
2282 + if (refcnt->ir_refcnt < 0)
2283 + return (-1);
2284 +
2285 + if (refcnt->ir_refcnt == 0)
2286 + return (0);
2287 +
2288 + if (refcnt->ir_waiting == REF_NOWAIT && refcnt->ir_refcnt > 0)
2289 + return (1);
2290 +
2291 + return (2);
2292 +}
2293 +
2266 2294 void
2267 2295 idm_conn_hold(idm_conn_t *ic)
2268 2296 {
2269 2297 idm_refcnt_hold(&ic->ic_refcnt);
2270 2298 }
2271 2299
2272 2300 void
2273 2301 idm_conn_rele(idm_conn_t *ic)
2274 2302 {
2275 2303 idm_refcnt_rele(&ic->ic_refcnt);
2276 2304 }
2277 2305
2278 2306 void
2279 2307 idm_conn_set_target_name(idm_conn_t *ic, char *target_name)
2280 2308 {
2281 2309 (void) strlcpy(ic->ic_target_name, target_name, ISCSI_MAX_NAME_LEN + 1);
2282 2310 }
2283 2311
2284 2312 void
2285 2313 idm_conn_set_initiator_name(idm_conn_t *ic, char *initiator_name)
2286 2314 {
2287 2315 (void) strlcpy(ic->ic_initiator_name, initiator_name,
2288 2316 ISCSI_MAX_NAME_LEN + 1);
2289 2317 }
2290 2318
2291 2319 void
2292 2320 idm_conn_set_isid(idm_conn_t *ic, uint8_t isid[ISCSI_ISID_LEN])
2293 2321 {
2294 2322 (void) snprintf(ic->ic_isid, ISCSI_MAX_ISID_LEN + 1,
2295 2323 "%02x%02x%02x%02x%02x%02x",
2296 2324 isid[0], isid[1], isid[2], isid[3], isid[4], isid[5]);
2297 2325 }
2298 2326
2299 2327 static int
2300 2328 _idm_init(void)
2301 2329 {
2302 2330 /* Initialize the rwlock for the taskid table */
2303 2331 rw_init(&idm.idm_taskid_table_lock, NULL, RW_DRIVER, NULL);
2304 2332
2305 2333 /* Initialize the global mutex and taskq */
2306 2334 mutex_init(&idm.idm_global_mutex, NULL, MUTEX_DEFAULT, NULL);
2307 2335
2308 2336 cv_init(&idm.idm_tgt_svc_cv, NULL, CV_DEFAULT, NULL);
2309 2337 cv_init(&idm.idm_wd_cv, NULL, CV_DEFAULT, NULL);
2310 2338
2311 2339 /*
2312 2340 * The maximum allocation needs to be high here since there can be
2313 2341 * many concurrent tasks using the global taskq.
2314 2342 */
2315 2343 idm.idm_global_taskq = taskq_create("idm_global_taskq", 1, minclsyspri,
2316 2344 128, 16384, TASKQ_PREPOPULATE);
2317 2345 if (idm.idm_global_taskq == NULL) {
2318 2346 cv_destroy(&idm.idm_wd_cv);
2319 2347 cv_destroy(&idm.idm_tgt_svc_cv);
2320 2348 mutex_destroy(&idm.idm_global_mutex);
2321 2349 rw_destroy(&idm.idm_taskid_table_lock);
2322 2350 return (ENOMEM);
2323 2351 }
2324 2352
2325 2353 /* Start watchdog thread */
2326 2354 idm.idm_wd_thread = thread_create(NULL, 0,
2327 2355 idm_wd_thread, NULL, 0, &p0, TS_RUN, minclsyspri);
2328 2356 if (idm.idm_wd_thread == NULL) {
2329 2357 /* Couldn't create the watchdog thread */
2330 2358 taskq_destroy(idm.idm_global_taskq);
2331 2359 cv_destroy(&idm.idm_wd_cv);
2332 2360 cv_destroy(&idm.idm_tgt_svc_cv);
2333 2361 mutex_destroy(&idm.idm_global_mutex);
2334 2362 rw_destroy(&idm.idm_taskid_table_lock);
2335 2363 return (ENOMEM);
2336 2364 }
2337 2365
2338 2366 /* Pause until the watchdog thread is running */
2339 2367 mutex_enter(&idm.idm_global_mutex);
2340 2368 while (!idm.idm_wd_thread_running)
2341 2369 cv_wait(&idm.idm_wd_cv, &idm.idm_global_mutex);
2342 2370 mutex_exit(&idm.idm_global_mutex);
2343 2371
2344 2372 /*
2345 2373 * Allocate the task ID table and set "next" to 0.
2346 2374 */
2347 2375
2348 2376 idm.idm_taskid_max = idm_max_taskids;
2349 2377 idm.idm_taskid_table = (idm_task_t **)
2350 2378 kmem_zalloc(idm.idm_taskid_max * sizeof (idm_task_t *), KM_SLEEP);
2351 2379 idm.idm_taskid_next = 0;
2352 2380
2353 2381 /* Create the global buffer and task kmem caches */
2354 2382 idm.idm_buf_cache = kmem_cache_create("idm_buf_cache",
2355 2383 sizeof (idm_buf_t), 8, NULL, NULL, NULL, NULL, NULL, KM_SLEEP);
2356 2384
2357 2385 /*
2358 2386 * Note, we're explicitly allocating an additional iSER header-
2359 2387 * sized chunk for each of these elements. See idm_task_constructor().
2360 2388 */
2361 2389 idm.idm_task_cache = kmem_cache_create("idm_task_cache",
2362 2390 sizeof (idm_task_t) + IDM_TRANSPORT_HEADER_LENGTH, 8,
2363 2391 &idm_task_constructor, &idm_task_destructor,
2364 2392 NULL, NULL, NULL, KM_SLEEP);
2365 2393
2366 2394 /* Create the service and connection context lists */
2367 2395 list_create(&idm.idm_tgt_svc_list, sizeof (idm_svc_t),
2368 2396 offsetof(idm_svc_t, is_list_node));
2369 2397 list_create(&idm.idm_tgt_conn_list, sizeof (idm_conn_t),
2370 2398 offsetof(idm_conn_t, ic_list_node));
2371 2399 list_create(&idm.idm_ini_conn_list, sizeof (idm_conn_t),
2372 2400 offsetof(idm_conn_t, ic_list_node));
2373 2401
2374 2402 /* Initialize the native sockets transport */
2375 2403 idm_so_init(&idm_transport_list[IDM_TRANSPORT_TYPE_SOCKETS]);
2376 2404
2377 2405 /* Create connection ID pool */
2378 2406 (void) idm_idpool_create(&idm.idm_conn_id_pool);
2379 2407
2380 2408 return (DDI_SUCCESS);
2381 2409 }
2382 2410
2383 2411 static int
2384 2412 _idm_fini(void)
2385 2413 {
2386 2414 if (!list_is_empty(&idm.idm_ini_conn_list) ||
2387 2415 !list_is_empty(&idm.idm_tgt_conn_list) ||
2388 2416 !list_is_empty(&idm.idm_tgt_svc_list)) {
2389 2417 return (EBUSY);
2390 2418 }
2391 2419
2392 2420 mutex_enter(&idm.idm_global_mutex);
2393 2421 idm.idm_wd_thread_running = B_FALSE;
2394 2422 cv_signal(&idm.idm_wd_cv);
2395 2423 mutex_exit(&idm.idm_global_mutex);
2396 2424
2397 2425 thread_join(idm.idm_wd_thread_did);
2398 2426
2399 2427 idm_idpool_destroy(&idm.idm_conn_id_pool);
2400 2428
2401 2429 /* Close any LDI handles we have open on transport drivers */
2402 2430 mutex_enter(&idm.idm_global_mutex);
2403 2431 idm_transport_teardown();
2404 2432 mutex_exit(&idm.idm_global_mutex);
2405 2433
2406 2434 /* Teardown the native sockets transport */
2407 2435 idm_so_fini();
2408 2436
2409 2437 list_destroy(&idm.idm_ini_conn_list);
2410 2438 list_destroy(&idm.idm_tgt_conn_list);
2411 2439 list_destroy(&idm.idm_tgt_svc_list);
2412 2440 kmem_cache_destroy(idm.idm_task_cache);
2413 2441 kmem_cache_destroy(idm.idm_buf_cache);
2414 2442 kmem_free(idm.idm_taskid_table,
2415 2443 idm.idm_taskid_max * sizeof (idm_task_t *));
2416 2444 mutex_destroy(&idm.idm_global_mutex);
2417 2445 cv_destroy(&idm.idm_wd_cv);
2418 2446 cv_destroy(&idm.idm_tgt_svc_cv);
2419 2447 rw_destroy(&idm.idm_taskid_table_lock);
2420 2448
2421 2449 return (0);
2422 2450 }
|
↓ open down ↓ |
147 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX