1 /*
   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
  24  */
  25 /*
  26  * Copyright 2019 Nexenta Systems, Inc.  All rights reserved.
  27  * Copyright (c) 2013 by Delphix. All rights reserved.
  28  * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
  29  */
  30 
  31 #include <sys/conf.h>
  32 #include <sys/file.h>
  33 #include <sys/ddi.h>
  34 #include <sys/sunddi.h>
  35 #include <sys/modctl.h>
  36 #include <sys/scsi/scsi.h>
  37 #include <sys/scsi/generic/persist.h>
  38 #include <sys/scsi/impl/scsi_reset_notify.h>
  39 #include <sys/disp.h>
  40 #include <sys/byteorder.h>
  41 #include <sys/atomic.h>
  42 #include <sys/ethernet.h>
  43 #include <sys/sdt.h>
  44 #include <sys/nvpair.h>
  45 #include <sys/zone.h>
  46 #include <sys/id_space.h>
  47 
  48 #include <sys/stmf.h>
  49 #include <sys/lpif.h>
  50 #include <sys/portif.h>
  51 #include <sys/stmf_ioctl.h>
  52 #include <sys/pppt_ic_if.h>
  53 
  54 #include "stmf_impl.h"
  55 #include "lun_map.h"
  56 #include "stmf_state.h"
  57 #include "stmf_stats.h"
  58 
  59 /*
  60  * Lock order:
  61  * stmf_state_lock --> ilport_lock/iss_lockp --> ilu_task_lock
  62  */
  63 
  64 static uint64_t stmf_session_counter = 0;
  65 static uint16_t stmf_rtpid_counter = 0;
  66 /* start messages at 1 */
  67 static uint64_t stmf_proxy_msg_id = 1;
  68 #define MSG_ID_TM_BIT   0x8000000000000000
  69 #define ALIGNED_TO_8BYTE_BOUNDARY(i)    (((i) + 7) & ~7)
  70 
  71 /*
  72  * When stmf_io_deadman_enabled is set to B_TRUE, we check that finishing up
  73  * I/O operations on an offlining LU doesn't take longer than stmf_io_deadman
  74  * seconds. If it does, we trigger a panic to inform the user of hung I/O
  75  * blocking us for too long.
  76  */
  77 boolean_t stmf_io_deadman_enabled = B_TRUE;
  78 int stmf_io_deadman = 1000;                     /* seconds */
  79 
  80 struct stmf_svc_clocks;
  81 
  82 static int stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
  83 static int stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
  84 static int stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
  85         void **result);
  86 static int stmf_open(dev_t *devp, int flag, int otype, cred_t *credp);
  87 static int stmf_close(dev_t dev, int flag, int otype, cred_t *credp);
  88 static int stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
  89         cred_t *credp, int *rval);
  90 static int stmf_get_stmf_state(stmf_state_desc_t *std);
  91 static int stmf_set_stmf_state(stmf_state_desc_t *std);
  92 static void stmf_abort_task_offline(scsi_task_t *task, int offline_lu,
  93     char *info);
  94 static int stmf_set_alua_state(stmf_alua_state_desc_t *alua_state);
  95 static void stmf_get_alua_state(stmf_alua_state_desc_t *alua_state);
  96 
  97 static void stmf_task_audit(stmf_i_scsi_task_t *itask,
  98     task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf);
  99 
 100 static boolean_t stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp);
 101 static char stmf_ctoi(char c);
 102 stmf_xfer_data_t *stmf_prepare_tpgs_data(uint8_t ilu_alua);
 103 void stmf_svc_init();
 104 stmf_status_t stmf_svc_fini();
 105 void stmf_svc(void *arg);
 106 static void stmf_wait_ilu_tasks_finish(stmf_i_lu_t *ilu);
 107 void stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info);
 108 static void stmf_svc_kill_obj_requests(void *obj);
 109 static void stmf_svc_timeout(struct stmf_svc_clocks *);
 110 void stmf_check_freetask();
 111 void stmf_abort_target_reset(scsi_task_t *task);
 112 stmf_status_t stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task,
 113                                                         int target_reset);
 114 void stmf_target_reset_poll(struct scsi_task *task);
 115 void stmf_handle_lun_reset(scsi_task_t *task);
 116 void stmf_handle_target_reset(scsi_task_t *task);
 117 void stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off);
 118 int stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
 119     uint32_t *err_ret);
 120 int stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi);
 121 int stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
 122     uint32_t *err_ret);
 123 void stmf_delete_ppd(stmf_pp_data_t *ppd);
 124 void stmf_delete_all_ppds();
 125 void stmf_trace_clear();
 126 void stmf_worker_init();
 127 stmf_status_t stmf_worker_fini();
 128 void stmf_worker_task(void *arg);
 129 static void stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss);
 130 static stmf_status_t stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg,
 131     uint32_t type);
 132 static stmf_status_t stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg);
 133 static stmf_status_t stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg);
 134 static stmf_status_t stmf_ic_rx_status(stmf_ic_status_msg_t *msg);
 135 static stmf_status_t stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg);
 136 void stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s);
 137 
 138 /* pppt modhandle */
 139 ddi_modhandle_t pppt_mod;
 140 
 141 /* pppt modload imported functions */
 142 stmf_ic_reg_port_msg_alloc_func_t ic_reg_port_msg_alloc;
 143 stmf_ic_dereg_port_msg_alloc_func_t ic_dereg_port_msg_alloc;
 144 stmf_ic_reg_lun_msg_alloc_func_t ic_reg_lun_msg_alloc;
 145 stmf_ic_dereg_lun_msg_alloc_func_t ic_dereg_lun_msg_alloc;
 146 stmf_ic_lun_active_msg_alloc_func_t ic_lun_active_msg_alloc;
 147 stmf_ic_scsi_cmd_msg_alloc_func_t ic_scsi_cmd_msg_alloc;
 148 stmf_ic_scsi_data_xfer_done_msg_alloc_func_t ic_scsi_data_xfer_done_msg_alloc;
 149 stmf_ic_session_create_msg_alloc_func_t ic_session_reg_msg_alloc;
 150 stmf_ic_session_destroy_msg_alloc_func_t ic_session_dereg_msg_alloc;
 151 stmf_ic_tx_msg_func_t ic_tx_msg;
 152 stmf_ic_msg_free_func_t ic_msg_free;
 153 
 154 static void stmf_itl_task_start(stmf_i_scsi_task_t *itask);
 155 static void stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask);
 156 static void stmf_itl_task_done(stmf_i_scsi_task_t *itask);
 157 
 158 static void stmf_lport_xfer_start(stmf_i_scsi_task_t *itask,
 159     stmf_data_buf_t *dbuf);
 160 static void stmf_lport_xfer_done(stmf_i_scsi_task_t *itask,
 161     stmf_data_buf_t *dbuf);
 162 
 163 static void stmf_update_kstat_lu_q(scsi_task_t *, void());
 164 static void stmf_update_kstat_lport_q(scsi_task_t *, void());
 165 static void stmf_update_kstat_lu_io(scsi_task_t *, stmf_data_buf_t *);
 166 static void stmf_update_kstat_lport_io(scsi_task_t *, stmf_data_buf_t *);
 167 static hrtime_t stmf_update_rport_timestamps(hrtime_t *start_tstamp,
 168     hrtime_t *done_tstamp, stmf_i_scsi_task_t *itask);
 169 
 170 static int stmf_irport_compare(const void *void_irport1,
 171     const void *void_irport2);
 172 static void stmf_create_kstat_rport(stmf_i_remote_port_t *irport);
 173 static void stmf_destroy_kstat_rport(stmf_i_remote_port_t *irport);
 174 static int stmf_kstat_rport_update(kstat_t *ksp, int rw);
 175 static stmf_i_remote_port_t *stmf_irport_create(scsi_devid_desc_t *rport_devid);
 176 static void stmf_irport_destroy(stmf_i_remote_port_t *irport);
 177 static stmf_i_remote_port_t *stmf_irport_register(
 178     scsi_devid_desc_t *rport_devid);
 179 static stmf_i_remote_port_t *stmf_irport_lookup_locked(
 180     scsi_devid_desc_t *rport_devid);
 181 static void stmf_irport_deregister(stmf_i_remote_port_t *irport);
 182 
 183 extern struct mod_ops mod_driverops;
 184 
 185 /* =====[ Tunables ]===== */
 186 /* Internal tracing */
 187 volatile int    stmf_trace_on = 0;
 188 volatile int    stmf_trace_buf_size = (1 * 1024 * 1024);
 189 /*
 190  * The reason default task timeout is 75 is because we want the
 191  * host to timeout 1st and mostly host timeout is 60 seconds.
 192  */
 193 volatile int    stmf_default_task_timeout = 75;
 194 /*
 195  * Setting this to one means, you are responsible for config load and keeping
 196  * things in sync with persistent database.
 197  */
 198 volatile int    stmf_allow_modunload = 0;
 199 
 200 volatile int stmf_nworkers = 512;
 201 volatile int stmf_worker_warn = 0;
 202 
 203 /* === [ Debugging and fault injection ] === */
 204 #ifdef  DEBUG
 205 volatile int stmf_drop_task_counter = 0;
 206 volatile int stmf_drop_buf_counter = 0;
 207 
 208 #endif
 209 
 210 stmf_state_t            stmf_state;
 211 static stmf_lu_t        *dlun0;
 212 
 213 static uint8_t stmf_first_zero[] =
 214         { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
 215 static uint8_t stmf_first_one[] =
 216         { 0xff, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 };
 217 
 218 static kmutex_t trace_buf_lock;
 219 static int      trace_buf_size;
 220 static int      trace_buf_curndx;
 221 caddr_t stmf_trace_buf;
 222 
 223 static enum {
 224         STMF_WORKERS_DISABLED = 0,
 225         STMF_WORKERS_ENABLING,
 226         STMF_WORKERS_ENABLED
 227 } stmf_workers_state = STMF_WORKERS_DISABLED;
 228 static kmutex_t stmf_worker_sel_mx;
 229 volatile uint32_t stmf_nworkers_cur = 0; /* # of workers currently running */
 230 static int stmf_worker_sel_counter = 0;
 231 static uint32_t stmf_cur_ntasks = 0;
 232 static clock_t stmf_wm_next = 0;
 233 static int stmf_nworkers_accepting_cmds;
 234 static stmf_worker_t *stmf_workers = NULL;
 235 static clock_t stmf_worker_scale_down_timer = 0;
 236 static int stmf_worker_scale_down_qd = 0;
 237 
 238 static struct cb_ops stmf_cb_ops = {
 239         stmf_open,                      /* open */
 240         stmf_close,                     /* close */
 241         nodev,                          /* strategy */
 242         nodev,                          /* print */
 243         nodev,                          /* dump */
 244         nodev,                          /* read */
 245         nodev,                          /* write */
 246         stmf_ioctl,                     /* ioctl */
 247         nodev,                          /* devmap */
 248         nodev,                          /* mmap */
 249         nodev,                          /* segmap */
 250         nochpoll,                       /* chpoll */
 251         ddi_prop_op,                    /* cb_prop_op */
 252         0,                              /* streamtab */
 253         D_NEW | D_MP,                   /* cb_flag */
 254         CB_REV,                         /* rev */
 255         nodev,                          /* aread */
 256         nodev                           /* awrite */
 257 };
 258 
 259 static struct dev_ops stmf_ops = {
 260         DEVO_REV,
 261         0,
 262         stmf_getinfo,
 263         nulldev,                /* identify */
 264         nulldev,                /* probe */
 265         stmf_attach,
 266         stmf_detach,
 267         nodev,                  /* reset */
 268         &stmf_cb_ops,
 269         NULL,                   /* bus_ops */
 270         NULL                    /* power */
 271 };
 272 
 273 
 274 #define STMF_MODULE_NAME        "stmf"
 275 
 276 #ifdef  DEBUG
 277 #define STMF_NAME               "COMSTAR STMF D " __DATE__ " " __TIME__
 278 #else
 279 #define STMF_NAME               "COMSTAR STMF"
 280 #endif
 281 
 282 static struct modldrv modldrv = {
 283         &mod_driverops,
 284         STMF_NAME,
 285         &stmf_ops
 286 };
 287 
 288 static struct modlinkage modlinkage = {
 289         MODREV_1,
 290         &modldrv,
 291         NULL
 292 };
 293 
 294 int
 295 _init(void)
 296 {
 297         int ret;
 298 
 299         ret = mod_install(&modlinkage);
 300         if (ret)
 301                 return (ret);
 302         stmf_trace_buf = kmem_zalloc(stmf_trace_buf_size, KM_SLEEP);
 303         trace_buf_size = stmf_trace_buf_size;
 304         trace_buf_curndx = 0;
 305         mutex_init(&trace_buf_lock, NULL, MUTEX_DRIVER, 0);
 306         mutex_init(&stmf_worker_sel_mx, NULL, MUTEX_ADAPTIVE, 0);
 307         bzero(&stmf_state, sizeof (stmf_state_t));
 308         /* STMF service is off by default */
 309         stmf_state.stmf_service_running = 0;
 310         /* default lu/lport states are online */
 311         stmf_state.stmf_default_lu_state = STMF_STATE_ONLINE;
 312         stmf_state.stmf_default_lport_state = STMF_STATE_ONLINE;
 313         mutex_init(&stmf_state.stmf_lock, NULL, MUTEX_DRIVER, NULL);
 314         cv_init(&stmf_state.stmf_cv, NULL, CV_DRIVER, NULL);
 315         stmf_session_counter = (uint64_t)ddi_get_lbolt();
 316         avl_create(&stmf_state.stmf_irportlist,
 317             stmf_irport_compare, sizeof (stmf_i_remote_port_t),
 318             offsetof(stmf_i_remote_port_t, irport_ln));
 319         stmf_state.stmf_ilport_inst_space =
 320             id_space_create("lport-instances", 0, MAX_ILPORT);
 321         stmf_state.stmf_irport_inst_space =
 322             id_space_create("rport-instances", 0, MAX_IRPORT);
 323         stmf_view_init();
 324         stmf_svc_init();
 325         stmf_dlun_init();
 326         return (ret);
 327 }
 328 
 329 int
 330 _fini(void)
 331 {
 332         int ret;
 333         stmf_i_remote_port_t    *irport;
 334         void                    *avl_dest_cookie = NULL;
 335 
 336         if (stmf_state.stmf_service_running)
 337                 return (EBUSY);
 338         if ((!stmf_allow_modunload) &&
 339             (stmf_state.stmf_config_state != STMF_CONFIG_NONE)) {
 340                 return (EBUSY);
 341         }
 342         if (stmf_state.stmf_nlps || stmf_state.stmf_npps) {
 343                 return (EBUSY);
 344         }
 345         if (stmf_dlun_fini() != STMF_SUCCESS)
 346                 return (EBUSY);
 347         if (stmf_worker_fini() != STMF_SUCCESS) {
 348                 stmf_dlun_init();
 349                 return (EBUSY);
 350         }
 351         if (stmf_svc_fini() != STMF_SUCCESS) {
 352                 stmf_dlun_init();
 353                 stmf_worker_init();
 354                 return (EBUSY);
 355         }
 356 
 357         ret = mod_remove(&modlinkage);
 358         if (ret) {
 359                 stmf_svc_init();
 360                 stmf_dlun_init();
 361                 stmf_worker_init();
 362                 return (ret);
 363         }
 364 
 365         stmf_view_clear_config();
 366 
 367         while ((irport = avl_destroy_nodes(&stmf_state.stmf_irportlist,
 368             &avl_dest_cookie)) != NULL)
 369                 stmf_irport_destroy(irport);
 370         avl_destroy(&stmf_state.stmf_irportlist);
 371         id_space_destroy(stmf_state.stmf_ilport_inst_space);
 372         id_space_destroy(stmf_state.stmf_irport_inst_space);
 373 
 374         kmem_free(stmf_trace_buf, stmf_trace_buf_size);
 375         mutex_destroy(&trace_buf_lock);
 376         mutex_destroy(&stmf_state.stmf_lock);
 377         mutex_destroy(&stmf_worker_sel_mx);
 378         cv_destroy(&stmf_state.stmf_cv);
 379         return (ret);
 380 }
 381 
 382 int
 383 _info(struct modinfo *modinfop)
 384 {
 385         return (mod_info(&modlinkage, modinfop));
 386 }
 387 
 388 /* ARGSUSED */
 389 static int
 390 stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
 391 {
 392         switch (cmd) {
 393         case DDI_INFO_DEVT2DEVINFO:
 394                 *result = stmf_state.stmf_dip;
 395                 break;
 396         case DDI_INFO_DEVT2INSTANCE:
 397                 *result =
 398                     (void *)(uintptr_t)ddi_get_instance(stmf_state.stmf_dip);
 399                 break;
 400         default:
 401                 return (DDI_FAILURE);
 402         }
 403 
 404         return (DDI_SUCCESS);
 405 }
 406 
 407 static int
 408 stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
 409 {
 410         switch (cmd) {
 411         case DDI_ATTACH:
 412                 stmf_state.stmf_dip = dip;
 413 
 414                 if (ddi_create_minor_node(dip, "admin", S_IFCHR, 0,
 415                     DDI_NT_STMF, 0) != DDI_SUCCESS) {
 416                         break;
 417                 }
 418                 ddi_report_dev(dip);
 419                 return (DDI_SUCCESS);
 420         }
 421 
 422         return (DDI_FAILURE);
 423 }
 424 
 425 static int
 426 stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
 427 {
 428         switch (cmd) {
 429         case DDI_DETACH:
 430                 ddi_remove_minor_node(dip, 0);
 431                 return (DDI_SUCCESS);
 432         }
 433 
 434         return (DDI_FAILURE);
 435 }
 436 
 437 /* ARGSUSED */
 438 static int
 439 stmf_open(dev_t *devp, int flag, int otype, cred_t *credp)
 440 {
 441         mutex_enter(&stmf_state.stmf_lock);
 442         if (stmf_state.stmf_exclusive_open) {
 443                 mutex_exit(&stmf_state.stmf_lock);
 444                 return (EBUSY);
 445         }
 446         if (flag & FEXCL) {
 447                 if (stmf_state.stmf_opened) {
 448                         mutex_exit(&stmf_state.stmf_lock);
 449                         return (EBUSY);
 450                 }
 451                 stmf_state.stmf_exclusive_open = 1;
 452         }
 453         stmf_state.stmf_opened = 1;
 454         mutex_exit(&stmf_state.stmf_lock);
 455         return (0);
 456 }
 457 
 458 /* ARGSUSED */
 459 static int
 460 stmf_close(dev_t dev, int flag, int otype, cred_t *credp)
 461 {
 462         mutex_enter(&stmf_state.stmf_lock);
 463         stmf_state.stmf_opened = 0;
 464         if (stmf_state.stmf_exclusive_open &&
 465             (stmf_state.stmf_config_state != STMF_CONFIG_INIT_DONE)) {
 466                 stmf_state.stmf_config_state = STMF_CONFIG_NONE;
 467                 stmf_delete_all_ppds();
 468                 stmf_view_clear_config();
 469                 stmf_view_init();
 470         }
 471         stmf_state.stmf_exclusive_open = 0;
 472         mutex_exit(&stmf_state.stmf_lock);
 473         return (0);
 474 }
 475 
 476 int
 477 stmf_copyin_iocdata(intptr_t data, int mode, stmf_iocdata_t **iocd,
 478     void **ibuf, void **obuf)
 479 {
 480         int ret;
 481 
 482         *ibuf = NULL;
 483         *obuf = NULL;
 484         *iocd = kmem_zalloc(sizeof (stmf_iocdata_t), KM_SLEEP);
 485 
 486         ret = ddi_copyin((void *)data, *iocd, sizeof (stmf_iocdata_t), mode);
 487         if (ret)
 488                 return (EFAULT);
 489         if ((*iocd)->stmf_version != STMF_VERSION_1) {
 490                 ret = EINVAL;
 491                 goto copyin_iocdata_done;
 492         }
 493         if ((*iocd)->stmf_ibuf_size) {
 494                 *ibuf = kmem_zalloc((*iocd)->stmf_ibuf_size, KM_SLEEP);
 495                 ret = ddi_copyin((void *)((unsigned long)(*iocd)->stmf_ibuf),
 496                     *ibuf, (*iocd)->stmf_ibuf_size, mode);
 497         }
 498         if ((*iocd)->stmf_obuf_size)
 499                 *obuf = kmem_zalloc((*iocd)->stmf_obuf_size, KM_SLEEP);
 500 
 501         if (ret == 0)
 502                 return (0);
 503         ret = EFAULT;
 504 copyin_iocdata_done:;
 505         if (*obuf) {
 506                 kmem_free(*obuf, (*iocd)->stmf_obuf_size);
 507                 *obuf = NULL;
 508         }
 509         if (*ibuf) {
 510                 kmem_free(*ibuf, (*iocd)->stmf_ibuf_size);
 511                 *ibuf = NULL;
 512         }
 513         kmem_free(*iocd, sizeof (stmf_iocdata_t));
 514         return (ret);
 515 }
 516 
 517 int
 518 stmf_copyout_iocdata(intptr_t data, int mode, stmf_iocdata_t *iocd, void *obuf)
 519 {
 520         int ret;
 521 
 522         if (iocd->stmf_obuf_size) {
 523                 ret = ddi_copyout(obuf, (void *)(unsigned long)iocd->stmf_obuf,
 524                     iocd->stmf_obuf_size, mode);
 525                 if (ret)
 526                         return (EFAULT);
 527         }
 528         ret = ddi_copyout(iocd, (void *)data, sizeof (stmf_iocdata_t), mode);
 529         if (ret)
 530                 return (EFAULT);
 531         return (0);
 532 }
 533 
 534 /* ARGSUSED */
 535 static int
 536 stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
 537     cred_t *credp, int *rval)
 538 {
 539         stmf_iocdata_t *iocd;
 540         void *ibuf = NULL, *obuf = NULL;
 541         slist_lu_t *luid_list;
 542         slist_target_port_t *lportid_list;
 543         stmf_i_lu_t *ilu;
 544         stmf_i_local_port_t *ilport;
 545         stmf_i_scsi_session_t *iss;
 546         slist_scsi_session_t *iss_list;
 547         sioc_lu_props_t *lup;
 548         sioc_target_port_props_t *lportp;
 549         stmf_ppioctl_data_t *ppi, *ppi_out = NULL;
 550         uint64_t *ppi_token = NULL;
 551         uint8_t *p_id, *id;
 552         stmf_state_desc_t *std;
 553         stmf_status_t ctl_ret;
 554         stmf_state_change_info_t ssi;
 555         int ret = 0;
 556         uint32_t n;
 557         int i;
 558         stmf_group_op_data_t *grp_entry;
 559         stmf_group_name_t *grpname;
 560         stmf_view_op_entry_t *ve;
 561         stmf_id_type_t idtype;
 562         stmf_id_data_t *id_entry;
 563         stmf_id_list_t  *id_list;
 564         stmf_view_entry_t *view_entry;
 565         stmf_set_props_t *stmf_set_props;
 566         uint32_t        veid;
 567         if ((cmd & 0xff000000) != STMF_IOCTL) {
 568                 return (ENOTTY);
 569         }
 570 
 571         if (drv_priv(credp) != 0) {
 572                 return (EPERM);
 573         }
 574 
 575         ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
 576         if (ret)
 577                 return (ret);
 578         iocd->stmf_error = 0;
 579 
 580         switch (cmd) {
 581         case STMF_IOCTL_LU_LIST:
 582                 /* retrieves both registered/unregistered */
 583                 mutex_enter(&stmf_state.stmf_lock);
 584                 id_list = &stmf_state.stmf_luid_list;
 585                 n = min(id_list->id_count,
 586                     (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
 587                 iocd->stmf_obuf_max_nentries = id_list->id_count;
 588                 luid_list = (slist_lu_t *)obuf;
 589                 id_entry = id_list->idl_head;
 590                 for (i = 0; i < n; i++) {
 591                         bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
 592                         id_entry = id_entry->id_next;
 593                 }
 594 
 595                 n = iocd->stmf_obuf_size/sizeof (slist_lu_t);
 596                 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
 597                         id = (uint8_t *)ilu->ilu_lu->lu_id;
 598                         if (stmf_lookup_id(id_list, 16, id + 4) == NULL) {
 599                                 iocd->stmf_obuf_max_nentries++;
 600                                 if (i < n) {
 601                                         bcopy(id + 4, luid_list[i].lu_guid,
 602                                             sizeof (slist_lu_t));
 603                                         i++;
 604                                 }
 605                         }
 606                 }
 607                 iocd->stmf_obuf_nentries = i;
 608                 mutex_exit(&stmf_state.stmf_lock);
 609                 break;
 610 
 611         case STMF_IOCTL_REG_LU_LIST:
 612                 mutex_enter(&stmf_state.stmf_lock);
 613                 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlus;
 614                 n = min(stmf_state.stmf_nlus,
 615                     (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
 616                 iocd->stmf_obuf_nentries = n;
 617                 ilu = stmf_state.stmf_ilulist;
 618                 luid_list = (slist_lu_t *)obuf;
 619                 for (i = 0; i < n; i++) {
 620                         uint8_t *id;
 621                         id = (uint8_t *)ilu->ilu_lu->lu_id;
 622                         bcopy(id + 4, luid_list[i].lu_guid, 16);
 623                         ilu = ilu->ilu_next;
 624                 }
 625                 mutex_exit(&stmf_state.stmf_lock);
 626                 break;
 627 
 628         case STMF_IOCTL_VE_LU_LIST:
 629                 mutex_enter(&stmf_state.stmf_lock);
 630                 id_list = &stmf_state.stmf_luid_list;
 631                 n = min(id_list->id_count,
 632                     (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
 633                 iocd->stmf_obuf_max_nentries = id_list->id_count;
 634                 iocd->stmf_obuf_nentries = n;
 635                 luid_list = (slist_lu_t *)obuf;
 636                 id_entry = id_list->idl_head;
 637                 for (i = 0; i < n; i++) {
 638                         bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
 639                         id_entry = id_entry->id_next;
 640                 }
 641                 mutex_exit(&stmf_state.stmf_lock);
 642                 break;
 643 
 644         case STMF_IOCTL_TARGET_PORT_LIST:
 645                 mutex_enter(&stmf_state.stmf_lock);
 646                 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlports;
 647                 n = min(stmf_state.stmf_nlports,
 648                     (iocd->stmf_obuf_size)/sizeof (slist_target_port_t));
 649                 iocd->stmf_obuf_nentries = n;
 650                 ilport = stmf_state.stmf_ilportlist;
 651                 lportid_list = (slist_target_port_t *)obuf;
 652                 for (i = 0; i < n; i++) {
 653                         uint8_t *id;
 654                         id = (uint8_t *)ilport->ilport_lport->lport_id;
 655                         bcopy(id, lportid_list[i].target, id[3] + 4);
 656                         ilport = ilport->ilport_next;
 657                 }
 658                 mutex_exit(&stmf_state.stmf_lock);
 659                 break;
 660 
 661         case STMF_IOCTL_SESSION_LIST:
 662                 p_id = (uint8_t *)ibuf;
 663                 if ((p_id == NULL) || (iocd->stmf_ibuf_size < 4) ||
 664                     (iocd->stmf_ibuf_size < (p_id[3] + 4))) {
 665                         ret = EINVAL;
 666                         break;
 667                 }
 668                 mutex_enter(&stmf_state.stmf_lock);
 669                 for (ilport = stmf_state.stmf_ilportlist; ilport; ilport =
 670                     ilport->ilport_next) {
 671                         uint8_t *id;
 672                         id = (uint8_t *)ilport->ilport_lport->lport_id;
 673                         if ((p_id[3] == id[3]) &&
 674                             (bcmp(p_id + 4, id + 4, id[3]) == 0)) {
 675                                 break;
 676                         }
 677                 }
 678                 if (ilport == NULL) {
 679                         mutex_exit(&stmf_state.stmf_lock);
 680                         ret = ENOENT;
 681                         break;
 682                 }
 683                 iocd->stmf_obuf_max_nentries = ilport->ilport_nsessions;
 684                 n = min(ilport->ilport_nsessions,
 685                     (iocd->stmf_obuf_size)/sizeof (slist_scsi_session_t));
 686                 iocd->stmf_obuf_nentries = n;
 687                 iss = ilport->ilport_ss_list;
 688                 iss_list = (slist_scsi_session_t *)obuf;
 689                 for (i = 0; i < n; i++) {
 690                         uint8_t *id;
 691                         id = (uint8_t *)iss->iss_ss->ss_rport_id;
 692                         bcopy(id, iss_list[i].initiator, id[3] + 4);
 693                         iss_list[i].creation_time = (uint32_t)
 694                             iss->iss_creation_time;
 695                         if (iss->iss_ss->ss_rport_alias) {
 696                                 (void) strncpy(iss_list[i].alias,
 697                                     iss->iss_ss->ss_rport_alias, 255);
 698                                 iss_list[i].alias[255] = 0;
 699                         } else {
 700                                 iss_list[i].alias[0] = 0;
 701                         }
 702                         iss = iss->iss_next;
 703                 }
 704                 mutex_exit(&stmf_state.stmf_lock);
 705                 break;
 706 
 707         case STMF_IOCTL_GET_LU_PROPERTIES:
 708                 p_id = (uint8_t *)ibuf;
 709                 if ((iocd->stmf_ibuf_size < 16) ||
 710                     (iocd->stmf_obuf_size < sizeof (sioc_lu_props_t)) ||
 711                     (p_id[0] == 0)) {
 712                         ret = EINVAL;
 713                         break;
 714                 }
 715                 mutex_enter(&stmf_state.stmf_lock);
 716                 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
 717                         if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
 718                                 break;
 719                 }
 720                 if (ilu == NULL) {
 721                         mutex_exit(&stmf_state.stmf_lock);
 722                         ret = ENOENT;
 723                         break;
 724                 }
 725                 lup = (sioc_lu_props_t *)obuf;
 726                 bcopy(ilu->ilu_lu->lu_id->ident, lup->lu_guid, 16);
 727                 lup->lu_state = ilu->ilu_state & 0x0f;
 728                 lup->lu_present = 1; /* XXX */
 729                 (void) strncpy(lup->lu_provider_name,
 730                     ilu->ilu_lu->lu_lp->lp_name, 255);
 731                 lup->lu_provider_name[254] = 0;
 732                 if (ilu->ilu_lu->lu_alias) {
 733                         (void) strncpy(lup->lu_alias,
 734                             ilu->ilu_lu->lu_alias, 255);
 735                         lup->lu_alias[255] = 0;
 736                 } else {
 737                         lup->lu_alias[0] = 0;
 738                 }
 739                 mutex_exit(&stmf_state.stmf_lock);
 740                 break;
 741 
 742         case STMF_IOCTL_GET_TARGET_PORT_PROPERTIES:
 743                 p_id = (uint8_t *)ibuf;
 744                 if ((p_id == NULL) ||
 745                     (iocd->stmf_ibuf_size < (p_id[3] + 4)) ||
 746                     (iocd->stmf_obuf_size <
 747                     sizeof (sioc_target_port_props_t))) {
 748                         ret = EINVAL;
 749                         break;
 750                 }
 751                 mutex_enter(&stmf_state.stmf_lock);
 752                 for (ilport = stmf_state.stmf_ilportlist; ilport;
 753                     ilport = ilport->ilport_next) {
 754                         uint8_t *id;
 755                         id = (uint8_t *)ilport->ilport_lport->lport_id;
 756                         if ((p_id[3] == id[3]) &&
 757                             (bcmp(p_id+4, id+4, id[3]) == 0))
 758                                 break;
 759                 }
 760                 if (ilport == NULL) {
 761                         mutex_exit(&stmf_state.stmf_lock);
 762                         ret = ENOENT;
 763                         break;
 764                 }
 765                 lportp = (sioc_target_port_props_t *)obuf;
 766                 bcopy(ilport->ilport_lport->lport_id, lportp->tgt_id,
 767                     ilport->ilport_lport->lport_id->ident_length + 4);
 768                 lportp->tgt_state = ilport->ilport_state & 0x0f;
 769                 lportp->tgt_present = 1; /* XXX */
 770                 (void) strncpy(lportp->tgt_provider_name,
 771                     ilport->ilport_lport->lport_pp->pp_name, 255);
 772                 lportp->tgt_provider_name[254] = 0;
 773                 if (ilport->ilport_lport->lport_alias) {
 774                         (void) strncpy(lportp->tgt_alias,
 775                             ilport->ilport_lport->lport_alias, 255);
 776                         lportp->tgt_alias[255] = 0;
 777                 } else {
 778                         lportp->tgt_alias[0] = 0;
 779                 }
 780                 mutex_exit(&stmf_state.stmf_lock);
 781                 break;
 782 
 783         case STMF_IOCTL_SET_STMF_STATE:
 784                 if ((ibuf == NULL) ||
 785                     (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
 786                         ret = EINVAL;
 787                         break;
 788                 }
 789                 ret = stmf_set_stmf_state((stmf_state_desc_t *)ibuf);
 790                 break;
 791 
 792         case STMF_IOCTL_GET_STMF_STATE:
 793                 if ((obuf == NULL) ||
 794                     (iocd->stmf_obuf_size < sizeof (stmf_state_desc_t))) {
 795                         ret = EINVAL;
 796                         break;
 797                 }
 798                 ret = stmf_get_stmf_state((stmf_state_desc_t *)obuf);
 799                 break;
 800 
 801         case STMF_IOCTL_SET_ALUA_STATE:
 802                 if ((ibuf == NULL) ||
 803                     (iocd->stmf_ibuf_size < sizeof (stmf_alua_state_desc_t))) {
 804                         ret = EINVAL;
 805                         break;
 806                 }
 807                 ret = stmf_set_alua_state((stmf_alua_state_desc_t *)ibuf);
 808                 break;
 809 
 810         case STMF_IOCTL_GET_ALUA_STATE:
 811                 if ((obuf == NULL) ||
 812                     (iocd->stmf_obuf_size < sizeof (stmf_alua_state_desc_t))) {
 813                         ret = EINVAL;
 814                         break;
 815                 }
 816                 stmf_get_alua_state((stmf_alua_state_desc_t *)obuf);
 817                 break;
 818 
 819         case STMF_IOCTL_SET_LU_STATE:
 820                 ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
 821                 ssi.st_additional_info = NULL;
 822                 std = (stmf_state_desc_t *)ibuf;
 823                 if ((ibuf == NULL) ||
 824                     (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
 825                         ret = EINVAL;
 826                         break;
 827                 }
 828                 p_id = std->ident;
 829                 mutex_enter(&stmf_state.stmf_lock);
 830                 if (stmf_state.stmf_inventory_locked) {
 831                         mutex_exit(&stmf_state.stmf_lock);
 832                         ret = EBUSY;
 833                         break;
 834                 }
 835                 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
 836                         if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
 837                                 break;
 838                 }
 839                 if (ilu == NULL) {
 840                         mutex_exit(&stmf_state.stmf_lock);
 841                         ret = ENOENT;
 842                         break;
 843                 }
 844                 stmf_state.stmf_inventory_locked = 1;
 845                 mutex_exit(&stmf_state.stmf_lock);
 846                 cmd = (std->state == STMF_STATE_ONLINE) ? STMF_CMD_LU_ONLINE :
 847                     STMF_CMD_LU_OFFLINE;
 848                 ctl_ret = stmf_ctl(cmd, (void *)ilu->ilu_lu, &ssi);
 849                 if (ctl_ret == STMF_ALREADY)
 850                         ret = 0;
 851                 else if (ctl_ret == STMF_BUSY)
 852                         ret = EBUSY;
 853                 else if (ctl_ret != STMF_SUCCESS)
 854                         ret = EIO;
 855                 mutex_enter(&stmf_state.stmf_lock);
 856                 stmf_state.stmf_inventory_locked = 0;
 857                 mutex_exit(&stmf_state.stmf_lock);
 858                 break;
 859 
 860         case STMF_IOCTL_SET_STMF_PROPS:
 861                 if ((ibuf == NULL) ||
 862                     (iocd->stmf_ibuf_size < sizeof (stmf_set_props_t))) {
 863                         ret = EINVAL;
 864                         break;
 865                 }
 866                 stmf_set_props = (stmf_set_props_t *)ibuf;
 867                 mutex_enter(&stmf_state.stmf_lock);
 868                 if ((stmf_set_props->default_lu_state_value ==
 869                     STMF_STATE_OFFLINE) ||
 870                     (stmf_set_props->default_lu_state_value ==
 871                     STMF_STATE_ONLINE)) {
 872                         stmf_state.stmf_default_lu_state =
 873                             stmf_set_props->default_lu_state_value;
 874                 }
 875                 if ((stmf_set_props->default_target_state_value ==
 876                     STMF_STATE_OFFLINE) ||
 877                     (stmf_set_props->default_target_state_value ==
 878                     STMF_STATE_ONLINE)) {
 879                         stmf_state.stmf_default_lport_state =
 880                             stmf_set_props->default_target_state_value;
 881                 }
 882 
 883                 mutex_exit(&stmf_state.stmf_lock);
 884                 break;
 885 
 886         case STMF_IOCTL_SET_TARGET_PORT_STATE:
 887                 ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
 888                 ssi.st_additional_info = NULL;
 889                 std = (stmf_state_desc_t *)ibuf;
 890                 if ((ibuf == NULL) ||
 891                     (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
 892                         ret = EINVAL;
 893                         break;
 894                 }
 895                 p_id = std->ident;
 896                 mutex_enter(&stmf_state.stmf_lock);
 897                 if (stmf_state.stmf_inventory_locked) {
 898                         mutex_exit(&stmf_state.stmf_lock);
 899                         ret = EBUSY;
 900                         break;
 901                 }
 902                 for (ilport = stmf_state.stmf_ilportlist; ilport;
 903                     ilport = ilport->ilport_next) {
 904                         uint8_t *id;
 905                         id = (uint8_t *)ilport->ilport_lport->lport_id;
 906                         if ((id[3] == p_id[3]) &&
 907                             (bcmp(id+4, p_id+4, id[3]) == 0)) {
 908                                 break;
 909                         }
 910                 }
 911                 if (ilport == NULL) {
 912                         mutex_exit(&stmf_state.stmf_lock);
 913                         ret = ENOENT;
 914                         break;
 915                 }
 916                 stmf_state.stmf_inventory_locked = 1;
 917                 mutex_exit(&stmf_state.stmf_lock);
 918                 cmd = (std->state == STMF_STATE_ONLINE) ?
 919                     STMF_CMD_LPORT_ONLINE : STMF_CMD_LPORT_OFFLINE;
 920                 ctl_ret = stmf_ctl(cmd, (void *)ilport->ilport_lport, &ssi);
 921                 if (ctl_ret == STMF_ALREADY)
 922                         ret = 0;
 923                 else if (ctl_ret == STMF_BUSY)
 924                         ret = EBUSY;
 925                 else if (ctl_ret != STMF_SUCCESS)
 926                         ret = EIO;
 927                 mutex_enter(&stmf_state.stmf_lock);
 928                 stmf_state.stmf_inventory_locked = 0;
 929                 mutex_exit(&stmf_state.stmf_lock);
 930                 break;
 931 
 932         case STMF_IOCTL_ADD_HG_ENTRY:
 933                 idtype = STMF_ID_TYPE_HOST;
 934                 /* FALLTHROUGH */
 935         case STMF_IOCTL_ADD_TG_ENTRY:
 936                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
 937                         ret = EACCES;
 938                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
 939                         break;
 940                 }
 941                 if (cmd == STMF_IOCTL_ADD_TG_ENTRY) {
 942                         idtype = STMF_ID_TYPE_TARGET;
 943                 }
 944                 grp_entry = (stmf_group_op_data_t *)ibuf;
 945                 if ((ibuf == NULL) ||
 946                     (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
 947                         ret = EINVAL;
 948                         break;
 949                 }
 950                 if (grp_entry->group.name[0] == '*') {
 951                         ret = EINVAL;
 952                         break; /* not allowed */
 953                 }
 954                 mutex_enter(&stmf_state.stmf_lock);
 955                 ret = stmf_add_group_member(grp_entry->group.name,
 956                     grp_entry->group.name_size,
 957                     grp_entry->ident + 4,
 958                     grp_entry->ident[3],
 959                     idtype,
 960                     &iocd->stmf_error);
 961                 mutex_exit(&stmf_state.stmf_lock);
 962                 break;
 963         case STMF_IOCTL_REMOVE_HG_ENTRY:
 964                 idtype = STMF_ID_TYPE_HOST;
 965                 /* FALLTHROUGH */
 966         case STMF_IOCTL_REMOVE_TG_ENTRY:
 967                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
 968                         ret = EACCES;
 969                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
 970                         break;
 971                 }
 972                 if (cmd == STMF_IOCTL_REMOVE_TG_ENTRY) {
 973                         idtype = STMF_ID_TYPE_TARGET;
 974                 }
 975                 grp_entry = (stmf_group_op_data_t *)ibuf;
 976                 if ((ibuf == NULL) ||
 977                     (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
 978                         ret = EINVAL;
 979                         break;
 980                 }
 981                 if (grp_entry->group.name[0] == '*') {
 982                         ret = EINVAL;
 983                         break; /* not allowed */
 984                 }
 985                 mutex_enter(&stmf_state.stmf_lock);
 986                 ret = stmf_remove_group_member(grp_entry->group.name,
 987                     grp_entry->group.name_size,
 988                     grp_entry->ident + 4,
 989                     grp_entry->ident[3],
 990                     idtype,
 991                     &iocd->stmf_error);
 992                 mutex_exit(&stmf_state.stmf_lock);
 993                 break;
 994         case STMF_IOCTL_CREATE_HOST_GROUP:
 995                 idtype = STMF_ID_TYPE_HOST_GROUP;
 996                 /* FALLTHROUGH */
 997         case STMF_IOCTL_CREATE_TARGET_GROUP:
 998                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
 999                         ret = EACCES;
1000                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1001                         break;
1002                 }
1003                 grpname = (stmf_group_name_t *)ibuf;
1004 
1005                 if (cmd == STMF_IOCTL_CREATE_TARGET_GROUP)
1006                         idtype = STMF_ID_TYPE_TARGET_GROUP;
1007                 if ((ibuf == NULL) ||
1008                     (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1009                         ret = EINVAL;
1010                         break;
1011                 }
1012                 if (grpname->name[0] == '*') {
1013                         ret = EINVAL;
1014                         break; /* not allowed */
1015                 }
1016                 mutex_enter(&stmf_state.stmf_lock);
1017                 ret = stmf_add_group(grpname->name,
1018                     grpname->name_size, idtype, &iocd->stmf_error);
1019                 mutex_exit(&stmf_state.stmf_lock);
1020                 break;
1021         case STMF_IOCTL_REMOVE_HOST_GROUP:
1022                 idtype = STMF_ID_TYPE_HOST_GROUP;
1023                 /* FALLTHROUGH */
1024         case STMF_IOCTL_REMOVE_TARGET_GROUP:
1025                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1026                         ret = EACCES;
1027                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1028                         break;
1029                 }
1030                 grpname = (stmf_group_name_t *)ibuf;
1031                 if (cmd == STMF_IOCTL_REMOVE_TARGET_GROUP)
1032                         idtype = STMF_ID_TYPE_TARGET_GROUP;
1033                 if ((ibuf == NULL) ||
1034                     (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1035                         ret = EINVAL;
1036                         break;
1037                 }
1038                 if (grpname->name[0] == '*') {
1039                         ret = EINVAL;
1040                         break; /* not allowed */
1041                 }
1042                 mutex_enter(&stmf_state.stmf_lock);
1043                 ret = stmf_remove_group(grpname->name,
1044                     grpname->name_size, idtype, &iocd->stmf_error);
1045                 mutex_exit(&stmf_state.stmf_lock);
1046                 break;
1047         case STMF_IOCTL_VALIDATE_VIEW:
1048         case STMF_IOCTL_ADD_VIEW_ENTRY:
1049                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1050                         ret = EACCES;
1051                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1052                         break;
1053                 }
1054                 ve = (stmf_view_op_entry_t *)ibuf;
1055                 if ((ibuf == NULL) ||
1056                     (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1057                         ret = EINVAL;
1058                         break;
1059                 }
1060                 if (!ve->ve_lu_number_valid)
1061                         ve->ve_lu_nbr[2] = 0xFF;
1062                 if (ve->ve_all_hosts) {
1063                         ve->ve_host_group.name[0] = '*';
1064                         ve->ve_host_group.name_size = 1;
1065                 }
1066                 if (ve->ve_all_targets) {
1067                         ve->ve_target_group.name[0] = '*';
1068                         ve->ve_target_group.name_size = 1;
1069                 }
1070                 if (ve->ve_ndx_valid)
1071                         veid = ve->ve_ndx;
1072                 else
1073                         veid = 0xffffffff;
1074                 mutex_enter(&stmf_state.stmf_lock);
1075                 if (cmd == STMF_IOCTL_ADD_VIEW_ENTRY) {
1076                         ret = stmf_add_ve(ve->ve_host_group.name,
1077                             ve->ve_host_group.name_size,
1078                             ve->ve_target_group.name,
1079                             ve->ve_target_group.name_size,
1080                             ve->ve_guid,
1081                             &veid,
1082                             ve->ve_lu_nbr,
1083                             &iocd->stmf_error);
1084                 } else {  /* STMF_IOCTL_VALIDATE_VIEW */
1085                         ret = stmf_validate_lun_ve(ve->ve_host_group.name,
1086                             ve->ve_host_group.name_size,
1087                             ve->ve_target_group.name,
1088                             ve->ve_target_group.name_size,
1089                             ve->ve_lu_nbr,
1090                             &iocd->stmf_error);
1091                 }
1092                 mutex_exit(&stmf_state.stmf_lock);
1093                 if (ret == 0 &&
1094                     (!ve->ve_ndx_valid || !ve->ve_lu_number_valid) &&
1095                     iocd->stmf_obuf_size >= sizeof (stmf_view_op_entry_t)) {
1096                         stmf_view_op_entry_t *ve_ret =
1097                             (stmf_view_op_entry_t *)obuf;
1098                         iocd->stmf_obuf_nentries = 1;
1099                         iocd->stmf_obuf_max_nentries = 1;
1100                         if (!ve->ve_ndx_valid) {
1101                                 ve_ret->ve_ndx = veid;
1102                                 ve_ret->ve_ndx_valid = 1;
1103                         }
1104                         if (!ve->ve_lu_number_valid) {
1105                                 ve_ret->ve_lu_number_valid = 1;
1106                                 bcopy(ve->ve_lu_nbr, ve_ret->ve_lu_nbr, 8);
1107                         }
1108                 }
1109                 break;
1110         case STMF_IOCTL_REMOVE_VIEW_ENTRY:
1111                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1112                         ret = EACCES;
1113                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1114                         break;
1115                 }
1116                 ve = (stmf_view_op_entry_t *)ibuf;
1117                 if ((ibuf == NULL) ||
1118                     (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1119                         ret = EINVAL;
1120                         break;
1121                 }
1122                 if (!ve->ve_ndx_valid) {
1123                         ret = EINVAL;
1124                         break;
1125                 }
1126                 mutex_enter(&stmf_state.stmf_lock);
1127                 ret = stmf_remove_ve_by_id(ve->ve_guid, ve->ve_ndx,
1128                     &iocd->stmf_error);
1129                 mutex_exit(&stmf_state.stmf_lock);
1130                 break;
1131         case STMF_IOCTL_GET_HG_LIST:
1132                 id_list = &stmf_state.stmf_hg_list;
1133                 /* FALLTHROUGH */
1134         case STMF_IOCTL_GET_TG_LIST:
1135                 if (cmd == STMF_IOCTL_GET_TG_LIST)
1136                         id_list = &stmf_state.stmf_tg_list;
1137                 mutex_enter(&stmf_state.stmf_lock);
1138                 iocd->stmf_obuf_max_nentries = id_list->id_count;
1139                 n = min(id_list->id_count,
1140                     (iocd->stmf_obuf_size)/sizeof (stmf_group_name_t));
1141                 iocd->stmf_obuf_nentries = n;
1142                 id_entry = id_list->idl_head;
1143                 grpname = (stmf_group_name_t *)obuf;
1144                 for (i = 0; i < n; i++) {
1145                         if (id_entry->id_data[0] == '*') {
1146                                 if (iocd->stmf_obuf_nentries > 0) {
1147                                         iocd->stmf_obuf_nentries--;
1148                                 }
1149                                 id_entry = id_entry->id_next;
1150                                 continue;
1151                         }
1152                         grpname->name_size = id_entry->id_data_size;
1153                         bcopy(id_entry->id_data, grpname->name,
1154                             id_entry->id_data_size);
1155                         grpname++;
1156                         id_entry = id_entry->id_next;
1157                 }
1158                 mutex_exit(&stmf_state.stmf_lock);
1159                 break;
1160         case STMF_IOCTL_GET_HG_ENTRIES:
1161                 id_list = &stmf_state.stmf_hg_list;
1162                 /* FALLTHROUGH */
1163         case STMF_IOCTL_GET_TG_ENTRIES:
1164                 grpname = (stmf_group_name_t *)ibuf;
1165                 if ((ibuf == NULL) ||
1166                     (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1167                         ret = EINVAL;
1168                         break;
1169                 }
1170                 if (cmd == STMF_IOCTL_GET_TG_ENTRIES) {
1171                         id_list = &stmf_state.stmf_tg_list;
1172                 }
1173                 mutex_enter(&stmf_state.stmf_lock);
1174                 id_entry = stmf_lookup_id(id_list, grpname->name_size,
1175                     grpname->name);
1176                 if (!id_entry)
1177                         ret = ENODEV;
1178                 else {
1179                         stmf_ge_ident_t *grp_entry;
1180                         id_list = (stmf_id_list_t *)id_entry->id_impl_specific;
1181                         iocd->stmf_obuf_max_nentries = id_list->id_count;
1182                         n = min(id_list->id_count,
1183                             iocd->stmf_obuf_size/sizeof (stmf_ge_ident_t));
1184                         iocd->stmf_obuf_nentries = n;
1185                         id_entry = id_list->idl_head;
1186                         grp_entry = (stmf_ge_ident_t *)obuf;
1187                         for (i = 0; i < n; i++) {
1188                                 bcopy(id_entry->id_data, grp_entry->ident,
1189                                     id_entry->id_data_size);
1190                                 grp_entry->ident_size = id_entry->id_data_size;
1191                                 id_entry = id_entry->id_next;
1192                                 grp_entry++;
1193                         }
1194                 }
1195                 mutex_exit(&stmf_state.stmf_lock);
1196                 break;
1197 
1198         case STMF_IOCTL_GET_VE_LIST:
1199                 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1200                 mutex_enter(&stmf_state.stmf_lock);
1201                 ve = (stmf_view_op_entry_t *)obuf;
1202                 for (id_entry = stmf_state.stmf_luid_list.idl_head;
1203                     id_entry; id_entry = id_entry->id_next) {
1204                         for (view_entry = (stmf_view_entry_t *)
1205                             id_entry->id_impl_specific; view_entry;
1206                             view_entry = view_entry->ve_next) {
1207                                 iocd->stmf_obuf_max_nentries++;
1208                                 if (iocd->stmf_obuf_nentries >= n)
1209                                         continue;
1210                                 ve->ve_ndx_valid = 1;
1211                                 ve->ve_ndx = view_entry->ve_id;
1212                                 ve->ve_lu_number_valid = 1;
1213                                 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1214                                 bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1215                                     view_entry->ve_luid->id_data_size);
1216                                 if (view_entry->ve_hg->id_data[0] == '*') {
1217                                         ve->ve_all_hosts = 1;
1218                                 } else {
1219                                         bcopy(view_entry->ve_hg->id_data,
1220                                             ve->ve_host_group.name,
1221                                             view_entry->ve_hg->id_data_size);
1222                                         ve->ve_host_group.name_size =
1223                                             view_entry->ve_hg->id_data_size;
1224                                 }
1225 
1226                                 if (view_entry->ve_tg->id_data[0] == '*') {
1227                                         ve->ve_all_targets = 1;
1228                                 } else {
1229                                         bcopy(view_entry->ve_tg->id_data,
1230                                             ve->ve_target_group.name,
1231                                             view_entry->ve_tg->id_data_size);
1232                                         ve->ve_target_group.name_size =
1233                                             view_entry->ve_tg->id_data_size;
1234                                 }
1235                                 ve++;
1236                                 iocd->stmf_obuf_nentries++;
1237                         }
1238                 }
1239                 mutex_exit(&stmf_state.stmf_lock);
1240                 break;
1241 
1242         case STMF_IOCTL_LU_VE_LIST:
1243                 p_id = (uint8_t *)ibuf;
1244                 if ((iocd->stmf_ibuf_size != 16) ||
1245                     (iocd->stmf_obuf_size < sizeof (stmf_view_op_entry_t))) {
1246                         ret = EINVAL;
1247                         break;
1248                 }
1249 
1250                 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1251                 mutex_enter(&stmf_state.stmf_lock);
1252                 ve = (stmf_view_op_entry_t *)obuf;
1253                 for (id_entry = stmf_state.stmf_luid_list.idl_head;
1254                     id_entry; id_entry = id_entry->id_next) {
1255                         if (bcmp(id_entry->id_data, p_id, 16) != 0)
1256                                 continue;
1257                         for (view_entry = (stmf_view_entry_t *)
1258                             id_entry->id_impl_specific; view_entry;
1259                             view_entry = view_entry->ve_next) {
1260                                 iocd->stmf_obuf_max_nentries++;
1261                                 if (iocd->stmf_obuf_nentries >= n)
1262                                         continue;
1263                                 ve->ve_ndx_valid = 1;
1264                                 ve->ve_ndx = view_entry->ve_id;
1265                                 ve->ve_lu_number_valid = 1;
1266                                 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1267                                 bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1268                                     view_entry->ve_luid->id_data_size);
1269                                 if (view_entry->ve_hg->id_data[0] == '*') {
1270                                         ve->ve_all_hosts = 1;
1271                                 } else {
1272                                         bcopy(view_entry->ve_hg->id_data,
1273                                             ve->ve_host_group.name,
1274                                             view_entry->ve_hg->id_data_size);
1275                                         ve->ve_host_group.name_size =
1276                                             view_entry->ve_hg->id_data_size;
1277                                 }
1278 
1279                                 if (view_entry->ve_tg->id_data[0] == '*') {
1280                                         ve->ve_all_targets = 1;
1281                                 } else {
1282                                         bcopy(view_entry->ve_tg->id_data,
1283                                             ve->ve_target_group.name,
1284                                             view_entry->ve_tg->id_data_size);
1285                                         ve->ve_target_group.name_size =
1286                                             view_entry->ve_tg->id_data_size;
1287                                 }
1288                                 ve++;
1289                                 iocd->stmf_obuf_nentries++;
1290                         }
1291                         break;
1292                 }
1293                 mutex_exit(&stmf_state.stmf_lock);
1294                 break;
1295 
1296         case STMF_IOCTL_LOAD_PP_DATA:
1297                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1298                         ret = EACCES;
1299                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1300                         break;
1301                 }
1302                 ppi = (stmf_ppioctl_data_t *)ibuf;
1303                 if ((ppi == NULL) ||
1304                     (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1305                         ret = EINVAL;
1306                         break;
1307                 }
1308                 /* returned token */
1309                 ppi_token = (uint64_t *)obuf;
1310                 if ((ppi_token == NULL) ||
1311                     (iocd->stmf_obuf_size < sizeof (uint64_t))) {
1312                         ret = EINVAL;
1313                         break;
1314                 }
1315                 ret = stmf_load_ppd_ioctl(ppi, ppi_token, &iocd->stmf_error);
1316                 break;
1317 
1318         case STMF_IOCTL_GET_PP_DATA:
1319                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1320                         ret = EACCES;
1321                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1322                         break;
1323                 }
1324                 ppi = (stmf_ppioctl_data_t *)ibuf;
1325                 if (ppi == NULL ||
1326                     (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1327                         ret = EINVAL;
1328                         break;
1329                 }
1330                 ppi_out = (stmf_ppioctl_data_t *)obuf;
1331                 if ((ppi_out == NULL) ||
1332                     (iocd->stmf_obuf_size < sizeof (stmf_ppioctl_data_t))) {
1333                         ret = EINVAL;
1334                         break;
1335                 }
1336                 ret = stmf_get_ppd_ioctl(ppi, ppi_out, &iocd->stmf_error);
1337                 break;
1338 
1339         case STMF_IOCTL_CLEAR_PP_DATA:
1340                 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1341                         ret = EACCES;
1342                         iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1343                         break;
1344                 }
1345                 ppi = (stmf_ppioctl_data_t *)ibuf;
1346                 if ((ppi == NULL) ||
1347                     (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1348                         ret = EINVAL;
1349                         break;
1350                 }
1351                 ret = stmf_delete_ppd_ioctl(ppi);
1352                 break;
1353 
1354         case STMF_IOCTL_CLEAR_TRACE:
1355                 stmf_trace_clear();
1356                 break;
1357 
1358         case STMF_IOCTL_ADD_TRACE:
1359                 if (iocd->stmf_ibuf_size && ibuf) {
1360                         ((uint8_t *)ibuf)[iocd->stmf_ibuf_size - 1] = 0;
1361                         stmf_trace("\nstradm", "%s\n", ibuf);
1362                 }
1363                 break;
1364 
1365         case STMF_IOCTL_GET_TRACE_POSITION:
1366                 if (obuf && (iocd->stmf_obuf_size > 3)) {
1367                         mutex_enter(&trace_buf_lock);
1368                         *((int *)obuf) = trace_buf_curndx;
1369                         mutex_exit(&trace_buf_lock);
1370                 } else {
1371                         ret = EINVAL;
1372                 }
1373                 break;
1374 
1375         case STMF_IOCTL_GET_TRACE:
1376                 if ((iocd->stmf_obuf_size == 0) || (iocd->stmf_ibuf_size < 4)) {
1377                         ret = EINVAL;
1378                         break;
1379                 }
1380                 i = *((int *)ibuf);
1381                 if ((i > trace_buf_size) || ((i + iocd->stmf_obuf_size) >
1382                     trace_buf_size)) {
1383                         ret = EINVAL;
1384                         break;
1385                 }
1386                 mutex_enter(&trace_buf_lock);
1387                 bcopy(stmf_trace_buf + i, obuf, iocd->stmf_obuf_size);
1388                 mutex_exit(&trace_buf_lock);
1389                 break;
1390 
1391         default:
1392                 ret = ENOTTY;
1393         }
1394 
1395         if (ret == 0) {
1396                 ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1397         } else if (iocd->stmf_error) {
1398                 (void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1399         }
1400         if (obuf) {
1401                 kmem_free(obuf, iocd->stmf_obuf_size);
1402                 obuf = NULL;
1403         }
1404         if (ibuf) {
1405                 kmem_free(ibuf, iocd->stmf_ibuf_size);
1406                 ibuf = NULL;
1407         }
1408         kmem_free(iocd, sizeof (stmf_iocdata_t));
1409         return (ret);
1410 }
1411 
1412 static int
1413 stmf_get_service_state()
1414 {
1415         stmf_i_local_port_t *ilport;
1416         stmf_i_lu_t *ilu;
1417         int online = 0;
1418         int offline = 0;
1419         int onlining = 0;
1420         int offlining = 0;
1421 
1422         ASSERT(mutex_owned(&stmf_state.stmf_lock));
1423         for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1424             ilport = ilport->ilport_next) {
1425                 if (ilport->ilport_state == STMF_STATE_OFFLINE)
1426                         offline++;
1427                 else if (ilport->ilport_state == STMF_STATE_ONLINE)
1428                         online++;
1429                 else if (ilport->ilport_state == STMF_STATE_ONLINING)
1430                         onlining++;
1431                 else if (ilport->ilport_state == STMF_STATE_OFFLINING)
1432                         offlining++;
1433         }
1434 
1435         for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1436             ilu = ilu->ilu_next) {
1437                 if (ilu->ilu_state == STMF_STATE_OFFLINE)
1438                         offline++;
1439                 else if (ilu->ilu_state == STMF_STATE_ONLINE)
1440                         online++;
1441                 else if (ilu->ilu_state == STMF_STATE_ONLINING)
1442                         onlining++;
1443                 else if (ilu->ilu_state == STMF_STATE_OFFLINING)
1444                         offlining++;
1445         }
1446 
1447         if (stmf_state.stmf_service_running) {
1448                 if (onlining)
1449                         return (STMF_STATE_ONLINING);
1450                 else
1451                         return (STMF_STATE_ONLINE);
1452         }
1453 
1454         if (offlining) {
1455                 return (STMF_STATE_OFFLINING);
1456         }
1457 
1458         return (STMF_STATE_OFFLINE);
1459 }
1460 
1461 static int
1462 stmf_set_stmf_state(stmf_state_desc_t *std)
1463 {
1464         stmf_i_local_port_t *ilport;
1465         stmf_i_lu_t *ilu;
1466         stmf_state_change_info_t ssi;
1467         int svc_state;
1468 
1469         ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
1470         ssi.st_additional_info = NULL;
1471 
1472         mutex_enter(&stmf_state.stmf_lock);
1473         if (!stmf_state.stmf_exclusive_open) {
1474                 mutex_exit(&stmf_state.stmf_lock);
1475                 return (EACCES);
1476         }
1477 
1478         if (stmf_state.stmf_inventory_locked) {
1479                 mutex_exit(&stmf_state.stmf_lock);
1480                 return (EBUSY);
1481         }
1482 
1483         if ((std->state != STMF_STATE_ONLINE) &&
1484             (std->state != STMF_STATE_OFFLINE)) {
1485                 mutex_exit(&stmf_state.stmf_lock);
1486                 return (EINVAL);
1487         }
1488 
1489         svc_state = stmf_get_service_state();
1490         if ((svc_state == STMF_STATE_OFFLINING) ||
1491             (svc_state == STMF_STATE_ONLINING)) {
1492                 mutex_exit(&stmf_state.stmf_lock);
1493                 return (EBUSY);
1494         }
1495 
1496         if (svc_state == STMF_STATE_OFFLINE) {
1497                 if (std->config_state == STMF_CONFIG_INIT) {
1498                         if (std->state != STMF_STATE_OFFLINE) {
1499                                 mutex_exit(&stmf_state.stmf_lock);
1500                                 return (EINVAL);
1501                         }
1502                         stmf_state.stmf_config_state = STMF_CONFIG_INIT;
1503                         stmf_delete_all_ppds();
1504                         stmf_view_clear_config();
1505                         stmf_view_init();
1506                         mutex_exit(&stmf_state.stmf_lock);
1507                         return (0);
1508                 }
1509                 if ((stmf_state.stmf_config_state == STMF_CONFIG_INIT) ||
1510                     (stmf_state.stmf_config_state == STMF_CONFIG_NONE)) {
1511                         if (std->config_state != STMF_CONFIG_INIT_DONE) {
1512                                 mutex_exit(&stmf_state.stmf_lock);
1513                                 return (EINVAL);
1514                         }
1515                         stmf_state.stmf_config_state = STMF_CONFIG_INIT_DONE;
1516                 }
1517                 if (std->state == STMF_STATE_OFFLINE) {
1518                         mutex_exit(&stmf_state.stmf_lock);
1519                         return (0);
1520                 }
1521                 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) {
1522                         mutex_exit(&stmf_state.stmf_lock);
1523                         return (EINVAL);
1524                 }
1525                 stmf_state.stmf_inventory_locked = 1;
1526                 stmf_state.stmf_service_running = 1;
1527                 mutex_exit(&stmf_state.stmf_lock);
1528 
1529                 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1530                     ilport = ilport->ilport_next) {
1531                         if (stmf_state.stmf_default_lport_state !=
1532                             STMF_STATE_ONLINE)
1533                                 continue;
1534                         (void) stmf_ctl(STMF_CMD_LPORT_ONLINE,
1535                             ilport->ilport_lport, &ssi);
1536                 }
1537 
1538                 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1539                     ilu = ilu->ilu_next) {
1540                         if (stmf_state.stmf_default_lu_state !=
1541                             STMF_STATE_ONLINE)
1542                                 continue;
1543                         (void) stmf_ctl(STMF_CMD_LU_ONLINE, ilu->ilu_lu, &ssi);
1544                 }
1545                 mutex_enter(&stmf_state.stmf_lock);
1546                 stmf_state.stmf_inventory_locked = 0;
1547                 mutex_exit(&stmf_state.stmf_lock);
1548                 return (0);
1549         }
1550 
1551         /* svc_state is STMF_STATE_ONLINE here */
1552         if ((std->state != STMF_STATE_OFFLINE) ||
1553             (std->config_state == STMF_CONFIG_INIT)) {
1554                 mutex_exit(&stmf_state.stmf_lock);
1555                 return (EACCES);
1556         }
1557 
1558         stmf_state.stmf_inventory_locked = 1;
1559         stmf_state.stmf_service_running = 0;
1560 
1561         mutex_exit(&stmf_state.stmf_lock);
1562         for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1563             ilport = ilport->ilport_next) {
1564                 if (ilport->ilport_state != STMF_STATE_ONLINE)
1565                         continue;
1566                 (void) stmf_ctl(STMF_CMD_LPORT_OFFLINE,
1567                     ilport->ilport_lport, &ssi);
1568         }
1569 
1570         for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1571             ilu = ilu->ilu_next) {
1572                 if (ilu->ilu_state != STMF_STATE_ONLINE)
1573                         continue;
1574                 (void) stmf_ctl(STMF_CMD_LU_OFFLINE, ilu->ilu_lu, &ssi);
1575         }
1576         mutex_enter(&stmf_state.stmf_lock);
1577         stmf_state.stmf_inventory_locked = 0;
1578         mutex_exit(&stmf_state.stmf_lock);
1579         return (0);
1580 }
1581 
1582 static int
1583 stmf_get_stmf_state(stmf_state_desc_t *std)
1584 {
1585         mutex_enter(&stmf_state.stmf_lock);
1586         std->state = stmf_get_service_state();
1587         std->config_state = stmf_state.stmf_config_state;
1588         mutex_exit(&stmf_state.stmf_lock);
1589 
1590         return (0);
1591 }
1592 
1593 /*
1594  * handles registration message from pppt for a logical unit
1595  */
1596 stmf_status_t
1597 stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, uint32_t type)
1598 {
1599         stmf_i_lu_provider_t    *ilp;
1600         stmf_lu_provider_t      *lp;
1601         mutex_enter(&stmf_state.stmf_lock);
1602         for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1603                 if (strcmp(msg->icrl_lu_provider_name,
1604                     ilp->ilp_lp->lp_name) == 0) {
1605                         lp = ilp->ilp_lp;
1606                         mutex_exit(&stmf_state.stmf_lock);
1607                         lp->lp_proxy_msg(msg->icrl_lun_id, msg->icrl_cb_arg,
1608                             msg->icrl_cb_arg_len, type);
1609                         return (STMF_SUCCESS);
1610                 }
1611         }
1612         mutex_exit(&stmf_state.stmf_lock);
1613         return (STMF_SUCCESS);
1614 }
1615 
1616 /*
1617  * handles de-registration message from pppt for a logical unit
1618  */
1619 stmf_status_t
1620 stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg)
1621 {
1622         stmf_i_lu_provider_t    *ilp;
1623         stmf_lu_provider_t      *lp;
1624         mutex_enter(&stmf_state.stmf_lock);
1625         for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1626                 if (strcmp(msg->icrl_lu_provider_name,
1627                     ilp->ilp_lp->lp_name) == 0) {
1628                         lp = ilp->ilp_lp;
1629                         mutex_exit(&stmf_state.stmf_lock);
1630                         lp->lp_proxy_msg(msg->icrl_lun_id, NULL, 0,
1631                             STMF_MSG_LU_DEREGISTER);
1632                         return (STMF_SUCCESS);
1633                 }
1634         }
1635         mutex_exit(&stmf_state.stmf_lock);
1636         return (STMF_SUCCESS);
1637 }
1638 
1639 /*
1640  * helper function to find a task that matches a task_msgid
1641  */
1642 scsi_task_t *
1643 find_task_from_msgid(uint8_t *lu_id, stmf_ic_msgid_t task_msgid)
1644 {
1645         stmf_i_lu_t *ilu;
1646         stmf_i_scsi_task_t *itask;
1647 
1648         mutex_enter(&stmf_state.stmf_lock);
1649         for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
1650                 if (bcmp(lu_id, ilu->ilu_lu->lu_id->ident, 16) == 0) {
1651                         break;
1652                 }
1653         }
1654 
1655         if (ilu == NULL) {
1656                 mutex_exit(&stmf_state.stmf_lock);
1657                 return (NULL);
1658         }
1659 
1660         mutex_enter(&ilu->ilu_task_lock);
1661         for (itask = ilu->ilu_tasks; itask != NULL;
1662             itask = itask->itask_lu_next) {
1663                 mutex_enter(&itask->itask_mutex);
1664                 if (itask->itask_flags & (ITASK_IN_FREE_LIST |
1665                     ITASK_BEING_ABORTED)) {
1666                         mutex_exit(&itask->itask_mutex);
1667                         continue;
1668                 }
1669                 mutex_exit(&itask->itask_mutex);
1670                 if (itask->itask_proxy_msg_id == task_msgid) {
1671                         break;
1672                 }
1673         }
1674         mutex_exit(&ilu->ilu_task_lock);
1675         mutex_exit(&stmf_state.stmf_lock);
1676 
1677         if (itask != NULL) {
1678                 return (itask->itask_task);
1679         } else {
1680                 /* task not found. Likely already aborted. */
1681                 return (NULL);
1682         }
1683 }
1684 
1685 /*
1686  * message received from pppt/ic
1687  */
1688 stmf_status_t
1689 stmf_msg_rx(stmf_ic_msg_t *msg)
1690 {
1691         mutex_enter(&stmf_state.stmf_lock);
1692         if (stmf_state.stmf_alua_state != 1) {
1693                 mutex_exit(&stmf_state.stmf_lock);
1694                 cmn_err(CE_WARN, "stmf alua state is disabled");
1695                 ic_msg_free(msg);
1696                 return (STMF_FAILURE);
1697         }
1698         mutex_exit(&stmf_state.stmf_lock);
1699 
1700         switch (msg->icm_msg_type) {
1701                 case STMF_ICM_REGISTER_LUN:
1702                         (void) stmf_ic_lu_reg(
1703                             (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1704                             STMF_MSG_LU_REGISTER);
1705                         break;
1706                 case STMF_ICM_LUN_ACTIVE:
1707                         (void) stmf_ic_lu_reg(
1708                             (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1709                             STMF_MSG_LU_ACTIVE);
1710                         break;
1711                 case STMF_ICM_DEREGISTER_LUN:
1712                         (void) stmf_ic_lu_dereg(
1713                             (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg);
1714                         break;
1715                 case STMF_ICM_SCSI_DATA:
1716                         (void) stmf_ic_rx_scsi_data(
1717                             (stmf_ic_scsi_data_msg_t *)msg->icm_msg);
1718                         break;
1719                 case STMF_ICM_SCSI_STATUS:
1720                         (void) stmf_ic_rx_scsi_status(
1721                             (stmf_ic_scsi_status_msg_t *)msg->icm_msg);
1722                         break;
1723                 case STMF_ICM_STATUS:
1724                         (void) stmf_ic_rx_status(
1725                             (stmf_ic_status_msg_t *)msg->icm_msg);
1726                         break;
1727                 default:
1728                         cmn_err(CE_WARN, "unknown message received %d",
1729                             msg->icm_msg_type);
1730                         ic_msg_free(msg);
1731                         return (STMF_FAILURE);
1732         }
1733         ic_msg_free(msg);
1734         return (STMF_SUCCESS);
1735 }
1736 
1737 stmf_status_t
1738 stmf_ic_rx_status(stmf_ic_status_msg_t *msg)
1739 {
1740         stmf_i_local_port_t *ilport;
1741 
1742         if (msg->ics_msg_type != STMF_ICM_REGISTER_PROXY_PORT) {
1743                 /* for now, ignore other message status */
1744                 return (STMF_SUCCESS);
1745         }
1746 
1747         if (msg->ics_status != STMF_SUCCESS) {
1748                 return (STMF_SUCCESS);
1749         }
1750 
1751         mutex_enter(&stmf_state.stmf_lock);
1752         for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1753             ilport = ilport->ilport_next) {
1754                 if (msg->ics_msgid == ilport->ilport_reg_msgid) {
1755                         ilport->ilport_proxy_registered = 1;
1756                         break;
1757                 }
1758         }
1759         mutex_exit(&stmf_state.stmf_lock);
1760         return (STMF_SUCCESS);
1761 }
1762 
1763 /*
1764  * handles scsi status message from pppt
1765  */
1766 stmf_status_t
1767 stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg)
1768 {
1769         scsi_task_t *task;
1770 
1771         /* is this a task management command */
1772         if (msg->icss_task_msgid & MSG_ID_TM_BIT) {
1773                 return (STMF_SUCCESS);
1774         }
1775 
1776         task = find_task_from_msgid(msg->icss_lun_id, msg->icss_task_msgid);
1777 
1778         if (task == NULL) {
1779                 return (STMF_SUCCESS);
1780         }
1781 
1782         task->task_scsi_status = msg->icss_status;
1783         task->task_sense_data = msg->icss_sense;
1784         task->task_sense_length = msg->icss_sense_len;
1785         (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE);
1786 
1787         return (STMF_SUCCESS);
1788 }
1789 
1790 /*
1791  * handles scsi data message from pppt
1792  */
1793 stmf_status_t
1794 stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg)
1795 {
1796         stmf_i_scsi_task_t *itask;
1797         scsi_task_t *task;
1798         stmf_xfer_data_t *xd = NULL;
1799         stmf_data_buf_t *dbuf;
1800         uint32_t sz, minsz, xd_sz, asz;
1801 
1802         /* is this a task management command */
1803         if (msg->icsd_task_msgid & MSG_ID_TM_BIT) {
1804                 return (STMF_SUCCESS);
1805         }
1806 
1807         task = find_task_from_msgid(msg->icsd_lun_id, msg->icsd_task_msgid);
1808         if (task == NULL) {
1809                 stmf_ic_msg_t *ic_xfer_done_msg = NULL;
1810                 static uint64_t data_msg_id;
1811                 stmf_status_t ic_ret = STMF_FAILURE;
1812                 mutex_enter(&stmf_state.stmf_lock);
1813                 data_msg_id = stmf_proxy_msg_id++;
1814                 mutex_exit(&stmf_state.stmf_lock);
1815                 /*
1816                  * send xfer done status to pppt
1817                  * for now, set the session id to 0 as we cannot
1818                  * ascertain it since we cannot find the task
1819                  */
1820                 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc(
1821                     msg->icsd_task_msgid, 0, STMF_FAILURE, data_msg_id);
1822                 if (ic_xfer_done_msg) {
1823                         ic_ret = ic_tx_msg(ic_xfer_done_msg);
1824                         if (ic_ret != STMF_IC_MSG_SUCCESS) {
1825                                 cmn_err(CE_WARN, "unable to xmit proxy msg");
1826                         }
1827                 }
1828                 return (STMF_FAILURE);
1829         }
1830 
1831         itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
1832         dbuf = itask->itask_proxy_dbuf;
1833 
1834         task->task_cmd_xfer_length += msg->icsd_data_len;
1835 
1836         if (task->task_additional_flags &
1837             TASK_AF_NO_EXPECTED_XFER_LENGTH) {
1838                 task->task_expected_xfer_length =
1839                     task->task_cmd_xfer_length;
1840         }
1841 
1842         sz = min(task->task_expected_xfer_length,
1843             task->task_cmd_xfer_length);
1844 
1845         xd_sz = msg->icsd_data_len;
1846         asz = xd_sz + sizeof (*xd) - 4;
1847         xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP);
1848 
1849         if (xd == NULL) {
1850                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1851                     STMF_ALLOC_FAILURE, NULL);
1852                 return (STMF_FAILURE);
1853         }
1854 
1855         xd->alloc_size = asz;
1856         xd->size_left = xd_sz;
1857         bcopy(msg->icsd_data, xd->buf, xd_sz);
1858 
1859         sz = min(sz, xd->size_left);
1860         xd->size_left = sz;
1861         minsz = min(512, sz);
1862 
1863         if (dbuf == NULL)
1864                 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
1865         if (dbuf == NULL) {
1866                 kmem_free(xd, xd->alloc_size);
1867                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1868                     STMF_ALLOC_FAILURE, NULL);
1869                 return (STMF_FAILURE);
1870         }
1871         dbuf->db_lu_private = xd;
1872         dbuf->db_relative_offset = task->task_nbytes_transferred;
1873         stmf_xd_to_dbuf(dbuf, 0);
1874 
1875         dbuf->db_flags = DB_DIRECTION_TO_RPORT;
1876         (void) stmf_xfer_data(task, dbuf, 0);
1877         return (STMF_SUCCESS);
1878 }
1879 
1880 stmf_status_t
1881 stmf_proxy_scsi_cmd(scsi_task_t *task, stmf_data_buf_t *dbuf)
1882 {
1883         stmf_i_scsi_task_t *itask =
1884             (stmf_i_scsi_task_t *)task->task_stmf_private;
1885         stmf_i_local_port_t *ilport =
1886             (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
1887         stmf_ic_msg_t *ic_cmd_msg;
1888         stmf_ic_msg_status_t ic_ret;
1889         stmf_status_t ret = STMF_FAILURE;
1890 
1891         if (stmf_state.stmf_alua_state != 1) {
1892                 cmn_err(CE_WARN, "stmf alua state is disabled");
1893                 return (STMF_FAILURE);
1894         }
1895 
1896         if (ilport->ilport_proxy_registered == 0) {
1897                 return (STMF_FAILURE);
1898         }
1899 
1900         mutex_enter(&stmf_state.stmf_lock);
1901         itask->itask_proxy_msg_id = stmf_proxy_msg_id++;
1902         mutex_exit(&stmf_state.stmf_lock);
1903         itask->itask_proxy_dbuf = dbuf;
1904 
1905         /*
1906          * stmf will now take over the task handling for this task
1907          * but it still needs to be treated differently from other
1908          * default handled tasks, hence the ITASK_PROXY_TASK.
1909          * If this is a task management function, we're really just
1910          * duping the command to the peer. Set the TM bit so that
1911          * we can recognize this on return since we won't be completing
1912          * the proxied task in that case.
1913          */
1914         mutex_enter(&itask->itask_mutex);
1915         if (task->task_mgmt_function) {
1916                 itask->itask_proxy_msg_id |= MSG_ID_TM_BIT;
1917         } else {
1918                 if (itask->itask_flags & ITASK_BEING_ABORTED) {
1919                         mutex_exit(&itask->itask_mutex);
1920                         return (STMF_FAILURE);
1921                 }
1922                 itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_PROXY_TASK;
1923         }
1924         if (dbuf) {
1925                 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1926                     task, dbuf->db_data_size, dbuf->db_sglist[0].seg_addr,
1927                     itask->itask_proxy_msg_id);
1928         } else {
1929                 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1930                     task, 0, NULL, itask->itask_proxy_msg_id);
1931         }
1932         mutex_exit(&itask->itask_mutex);
1933         if (ic_cmd_msg) {
1934                 ic_ret = ic_tx_msg(ic_cmd_msg);
1935                 if (ic_ret == STMF_IC_MSG_SUCCESS) {
1936                         ret = STMF_SUCCESS;
1937                 }
1938         }
1939         return (ret);
1940 }
1941 
1942 
1943 stmf_status_t
1944 pppt_modload()
1945 {
1946         int error;
1947 
1948         if (pppt_mod == NULL && ((pppt_mod =
1949             ddi_modopen("drv/pppt", KRTLD_MODE_FIRST, &error)) == NULL)) {
1950                 cmn_err(CE_WARN, "Unable to load pppt");
1951                 return (STMF_FAILURE);
1952         }
1953 
1954         if (ic_reg_port_msg_alloc == NULL && ((ic_reg_port_msg_alloc =
1955             (stmf_ic_reg_port_msg_alloc_func_t)
1956             ddi_modsym(pppt_mod, "stmf_ic_reg_port_msg_alloc",
1957             &error)) == NULL)) {
1958                 cmn_err(CE_WARN,
1959                     "Unable to find symbol - stmf_ic_reg_port_msg_alloc");
1960                 return (STMF_FAILURE);
1961         }
1962 
1963 
1964         if (ic_dereg_port_msg_alloc == NULL && ((ic_dereg_port_msg_alloc =
1965             (stmf_ic_dereg_port_msg_alloc_func_t)
1966             ddi_modsym(pppt_mod, "stmf_ic_dereg_port_msg_alloc",
1967             &error)) == NULL)) {
1968                 cmn_err(CE_WARN,
1969                     "Unable to find symbol - stmf_ic_dereg_port_msg_alloc");
1970                 return (STMF_FAILURE);
1971         }
1972 
1973         if (ic_reg_lun_msg_alloc == NULL && ((ic_reg_lun_msg_alloc =
1974             (stmf_ic_reg_lun_msg_alloc_func_t)
1975             ddi_modsym(pppt_mod, "stmf_ic_reg_lun_msg_alloc",
1976             &error)) == NULL)) {
1977                 cmn_err(CE_WARN,
1978                     "Unable to find symbol - stmf_ic_reg_lun_msg_alloc");
1979                 return (STMF_FAILURE);
1980         }
1981 
1982         if (ic_lun_active_msg_alloc == NULL && ((ic_lun_active_msg_alloc =
1983             (stmf_ic_lun_active_msg_alloc_func_t)
1984             ddi_modsym(pppt_mod, "stmf_ic_lun_active_msg_alloc",
1985             &error)) == NULL)) {
1986                 cmn_err(CE_WARN,
1987                     "Unable to find symbol - stmf_ic_lun_active_msg_alloc");
1988                 return (STMF_FAILURE);
1989         }
1990 
1991         if (ic_dereg_lun_msg_alloc == NULL && ((ic_dereg_lun_msg_alloc =
1992             (stmf_ic_dereg_lun_msg_alloc_func_t)
1993             ddi_modsym(pppt_mod, "stmf_ic_dereg_lun_msg_alloc",
1994             &error)) == NULL)) {
1995                 cmn_err(CE_WARN,
1996                     "Unable to find symbol - stmf_ic_dereg_lun_msg_alloc");
1997                 return (STMF_FAILURE);
1998         }
1999 
2000         if (ic_scsi_cmd_msg_alloc == NULL && ((ic_scsi_cmd_msg_alloc =
2001             (stmf_ic_scsi_cmd_msg_alloc_func_t)
2002             ddi_modsym(pppt_mod, "stmf_ic_scsi_cmd_msg_alloc",
2003             &error)) == NULL)) {
2004                 cmn_err(CE_WARN,
2005                     "Unable to find symbol - stmf_ic_scsi_cmd_msg_alloc");
2006                 return (STMF_FAILURE);
2007         }
2008 
2009         if (ic_scsi_data_xfer_done_msg_alloc == NULL &&
2010             ((ic_scsi_data_xfer_done_msg_alloc =
2011             (stmf_ic_scsi_data_xfer_done_msg_alloc_func_t)
2012             ddi_modsym(pppt_mod, "stmf_ic_scsi_data_xfer_done_msg_alloc",
2013             &error)) == NULL)) {
2014                 cmn_err(CE_WARN,
2015                     "Unable to find symbol -"
2016                     "stmf_ic_scsi_data_xfer_done_msg_alloc");
2017                 return (STMF_FAILURE);
2018         }
2019 
2020         if (ic_session_reg_msg_alloc == NULL &&
2021             ((ic_session_reg_msg_alloc =
2022             (stmf_ic_session_create_msg_alloc_func_t)
2023             ddi_modsym(pppt_mod, "stmf_ic_session_create_msg_alloc",
2024             &error)) == NULL)) {
2025                 cmn_err(CE_WARN,
2026                     "Unable to find symbol -"
2027                     "stmf_ic_session_create_msg_alloc");
2028                 return (STMF_FAILURE);
2029         }
2030 
2031         if (ic_session_dereg_msg_alloc == NULL &&
2032             ((ic_session_dereg_msg_alloc =
2033             (stmf_ic_session_destroy_msg_alloc_func_t)
2034             ddi_modsym(pppt_mod, "stmf_ic_session_destroy_msg_alloc",
2035             &error)) == NULL)) {
2036                 cmn_err(CE_WARN,
2037                     "Unable to find symbol -"
2038                     "stmf_ic_session_destroy_msg_alloc");
2039                 return (STMF_FAILURE);
2040         }
2041 
2042         if (ic_tx_msg == NULL && ((ic_tx_msg =
2043             (stmf_ic_tx_msg_func_t)ddi_modsym(pppt_mod, "stmf_ic_tx_msg",
2044             &error)) == NULL)) {
2045                 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_tx_msg");
2046                 return (STMF_FAILURE);
2047         }
2048 
2049         if (ic_msg_free == NULL && ((ic_msg_free =
2050             (stmf_ic_msg_free_func_t)ddi_modsym(pppt_mod, "stmf_ic_msg_free",
2051             &error)) == NULL)) {
2052                 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_msg_free");
2053                 return (STMF_FAILURE);
2054         }
2055         return (STMF_SUCCESS);
2056 }
2057 
2058 static void
2059 stmf_get_alua_state(stmf_alua_state_desc_t *alua_state)
2060 {
2061         mutex_enter(&stmf_state.stmf_lock);
2062         alua_state->alua_node = stmf_state.stmf_alua_node;
2063         alua_state->alua_state = stmf_state.stmf_alua_state;
2064         mutex_exit(&stmf_state.stmf_lock);
2065 }
2066 
2067 
2068 static int
2069 stmf_set_alua_state(stmf_alua_state_desc_t *alua_state)
2070 {
2071         stmf_i_local_port_t *ilport;
2072         stmf_i_lu_t *ilu;
2073         stmf_lu_t *lu;
2074         stmf_ic_msg_status_t ic_ret;
2075         stmf_ic_msg_t *ic_reg_lun, *ic_reg_port;
2076         stmf_local_port_t *lport;
2077         int ret = 0;
2078 
2079         if (alua_state->alua_state > 1 || alua_state->alua_node > 1) {
2080                 return (EINVAL);
2081         }
2082 
2083         mutex_enter(&stmf_state.stmf_lock);
2084         if (alua_state->alua_state == 1) {
2085                 if (pppt_modload() == STMF_FAILURE) {
2086                         ret = EIO;
2087                         goto err;
2088                 }
2089                 if (alua_state->alua_node != 0) {
2090                         /* reset existing rtpids to new base */
2091                         stmf_rtpid_counter = 255;
2092                 }
2093                 stmf_state.stmf_alua_node = alua_state->alua_node;
2094                 stmf_state.stmf_alua_state = 1;
2095                 /* register existing local ports with ppp */
2096                 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2097                     ilport = ilport->ilport_next) {
2098                         /* skip standby ports and non-alua participants */
2099                         if (ilport->ilport_standby == 1 ||
2100                             ilport->ilport_alua == 0) {
2101                                 continue;
2102                         }
2103                         if (alua_state->alua_node != 0) {
2104                                 ilport->ilport_rtpid =
2105                                     atomic_inc_16_nv(&stmf_rtpid_counter);
2106                         }
2107                         lport = ilport->ilport_lport;
2108                         ic_reg_port = ic_reg_port_msg_alloc(
2109                             lport->lport_id, ilport->ilport_rtpid,
2110                             0, NULL, stmf_proxy_msg_id);
2111                         if (ic_reg_port) {
2112                                 ic_ret = ic_tx_msg(ic_reg_port);
2113                                 if (ic_ret == STMF_IC_MSG_SUCCESS) {
2114                                         ilport->ilport_reg_msgid =
2115                                             stmf_proxy_msg_id++;
2116                                 } else {
2117                                         cmn_err(CE_WARN,
2118                                             "error on port registration "
2119                                             "port - %s",
2120                                             ilport->ilport_kstat_tgt_name);
2121                                 }
2122                         }
2123                 }
2124                 /* register existing logical units */
2125                 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
2126                     ilu = ilu->ilu_next) {
2127                         if (ilu->ilu_access != STMF_LU_ACTIVE) {
2128                                 continue;
2129                         }
2130                         /* register with proxy module */
2131                         lu = ilu->ilu_lu;
2132                         if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
2133                             lu->lu_lp->lp_alua_support) {
2134                                 ilu->ilu_alua = 1;
2135                                 /* allocate the register message */
2136                                 ic_reg_lun = ic_reg_lun_msg_alloc(
2137                                     lu->lu_id->ident, lu->lu_lp->lp_name,
2138                                     lu->lu_proxy_reg_arg_len,
2139                                     (uint8_t *)lu->lu_proxy_reg_arg,
2140                                     stmf_proxy_msg_id);
2141                                 /* send the message */
2142                                 if (ic_reg_lun) {
2143                                         ic_ret = ic_tx_msg(ic_reg_lun);
2144                                         if (ic_ret == STMF_IC_MSG_SUCCESS) {
2145                                                 stmf_proxy_msg_id++;
2146                                         }
2147                                 }
2148                         }
2149                 }
2150         } else {
2151                 stmf_state.stmf_alua_state = 0;
2152         }
2153 
2154 err:
2155         mutex_exit(&stmf_state.stmf_lock);
2156         return (ret);
2157 }
2158 
2159 
2160 typedef struct {
2161         void    *bp;    /* back pointer from internal struct to main struct */
2162         int     alloc_size;
2163 } __istmf_t;
2164 
2165 typedef struct {
2166         __istmf_t       *fp;    /* Framework private */
2167         void            *cp;    /* Caller private */
2168         void            *ss;    /* struct specific */
2169 } __stmf_t;
2170 
2171 static struct {
2172         int shared;
2173         int fw_private;
2174 } stmf_sizes[] = { { 0, 0 },
2175         { GET_STRUCT_SIZE(stmf_lu_provider_t),
2176                 GET_STRUCT_SIZE(stmf_i_lu_provider_t) },
2177         { GET_STRUCT_SIZE(stmf_port_provider_t),
2178                 GET_STRUCT_SIZE(stmf_i_port_provider_t) },
2179         { GET_STRUCT_SIZE(stmf_local_port_t),
2180                 GET_STRUCT_SIZE(stmf_i_local_port_t) },
2181         { GET_STRUCT_SIZE(stmf_lu_t),
2182                 GET_STRUCT_SIZE(stmf_i_lu_t) },
2183         { GET_STRUCT_SIZE(stmf_scsi_session_t),
2184                 GET_STRUCT_SIZE(stmf_i_scsi_session_t) },
2185         { GET_STRUCT_SIZE(scsi_task_t),
2186                 GET_STRUCT_SIZE(stmf_i_scsi_task_t) },
2187         { GET_STRUCT_SIZE(stmf_data_buf_t),
2188                 GET_STRUCT_SIZE(__istmf_t) },
2189         { GET_STRUCT_SIZE(stmf_dbuf_store_t),
2190                 GET_STRUCT_SIZE(__istmf_t) }
2191 
2192 };
2193 
2194 void *
2195 stmf_alloc(stmf_struct_id_t struct_id, int additional_size, int flags)
2196 {
2197         int stmf_size;
2198         int kmem_flag;
2199         __stmf_t *sh;
2200 
2201         if ((struct_id == 0) || (struct_id >= STMF_MAX_STRUCT_IDS))
2202                 return (NULL);
2203 
2204         if ((curthread->t_flag & T_INTR_THREAD) || (flags & AF_FORCE_NOSLEEP)) {
2205                 kmem_flag = KM_NOSLEEP;
2206         } else {
2207                 kmem_flag = KM_SLEEP;
2208         }
2209 
2210         additional_size = (additional_size + 7) & (~7);
2211         stmf_size = stmf_sizes[struct_id].shared +
2212             stmf_sizes[struct_id].fw_private + additional_size;
2213 
2214         if (flags & AF_DONTZERO)
2215                 sh = (__stmf_t *)kmem_alloc(stmf_size, kmem_flag);
2216         else
2217                 sh = (__stmf_t *)kmem_zalloc(stmf_size, kmem_flag);
2218 
2219         if (sh == NULL)
2220                 return (NULL);
2221 
2222         /*
2223          * In principle, the implementation inside stmf_alloc should not
2224          * be changed anyway. But the original order of framework private
2225          * data and caller private data does not support sglist in the caller
2226          * private data.
2227          * To work around this, the memory segments of framework private
2228          * data and caller private data are re-ordered here.
2229          * A better solution is to provide a specific interface to allocate
2230          * the sglist, then we will not need this workaround any more.
2231          * But before the new interface is available, the memory segment
2232          * ordering should be kept as is.
2233          */
2234         sh->cp = GET_BYTE_OFFSET(sh, stmf_sizes[struct_id].shared);
2235         sh->fp = (__istmf_t *)GET_BYTE_OFFSET(sh,
2236             stmf_sizes[struct_id].shared + additional_size);
2237 
2238         sh->fp->bp = sh;
2239         /* Just store the total size instead of storing additional size */
2240         sh->fp->alloc_size = stmf_size;
2241 
2242         return (sh);
2243 }
2244 
2245 void
2246 stmf_free(void *ptr)
2247 {
2248         __stmf_t *sh = (__stmf_t *)ptr;
2249 
2250         /*
2251          * So far we dont need any struct specific processing. If such
2252          * a need ever arises, then store the struct id in the framework
2253          * private section and get it here as sh->fp->struct_id.
2254          */
2255         kmem_free(ptr, sh->fp->alloc_size);
2256 }
2257 
2258 /*
2259  * Given a pointer to stmf_lu_t, verifies if this lu is registered with the
2260  * framework and returns a pointer to framework private data for the lu.
2261  * Returns NULL if the lu was not found.
2262  */
2263 stmf_i_lu_t *
2264 stmf_lookup_lu(stmf_lu_t *lu)
2265 {
2266         stmf_i_lu_t *ilu;
2267         ASSERT(mutex_owned(&stmf_state.stmf_lock));
2268 
2269         for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
2270                 if (ilu->ilu_lu == lu)
2271                         return (ilu);
2272         }
2273         return (NULL);
2274 }
2275 
2276 /*
2277  * Given a pointer to stmf_local_port_t, verifies if this lport is registered
2278  * with the framework and returns a pointer to framework private data for
2279  * the lport.
2280  * Returns NULL if the lport was not found.
2281  */
2282 stmf_i_local_port_t *
2283 stmf_lookup_lport(stmf_local_port_t *lport)
2284 {
2285         stmf_i_local_port_t *ilport;
2286         ASSERT(mutex_owned(&stmf_state.stmf_lock));
2287 
2288         for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2289             ilport = ilport->ilport_next) {
2290                 if (ilport->ilport_lport == lport)
2291                         return (ilport);
2292         }
2293         return (NULL);
2294 }
2295 
2296 stmf_status_t
2297 stmf_register_lu_provider(stmf_lu_provider_t *lp)
2298 {
2299         stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2300         stmf_pp_data_t *ppd;
2301         uint32_t cb_flags;
2302 
2303         if (lp->lp_lpif_rev != LPIF_REV_1 && lp->lp_lpif_rev != LPIF_REV_2)
2304                 return (STMF_FAILURE);
2305 
2306         mutex_enter(&stmf_state.stmf_lock);
2307         ilp->ilp_next = stmf_state.stmf_ilplist;
2308         stmf_state.stmf_ilplist = ilp;
2309         stmf_state.stmf_nlps++;
2310 
2311         /* See if we need to do a callback */
2312         for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2313                 if (strcmp(ppd->ppd_name, lp->lp_name) == 0) {
2314                         break;
2315                 }
2316         }
2317         if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2318                 goto rlp_bail_out;
2319         }
2320         ilp->ilp_ppd = ppd;
2321         ppd->ppd_provider = ilp;
2322         if (lp->lp_cb == NULL)
2323                 goto rlp_bail_out;
2324         ilp->ilp_cb_in_progress = 1;
2325         cb_flags = STMF_PCB_PREG_COMPLETE;
2326         if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2327                 cb_flags |= STMF_PCB_STMF_ONLINING;
2328         mutex_exit(&stmf_state.stmf_lock);
2329         lp->lp_cb(lp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2330         mutex_enter(&stmf_state.stmf_lock);
2331         ilp->ilp_cb_in_progress = 0;
2332 
2333 rlp_bail_out:
2334         mutex_exit(&stmf_state.stmf_lock);
2335 
2336         return (STMF_SUCCESS);
2337 }
2338 
2339 stmf_status_t
2340 stmf_deregister_lu_provider(stmf_lu_provider_t *lp)
2341 {
2342         stmf_i_lu_provider_t    **ppilp;
2343         stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2344 
2345         mutex_enter(&stmf_state.stmf_lock);
2346         if (ilp->ilp_nlus || ilp->ilp_cb_in_progress) {
2347                 mutex_exit(&stmf_state.stmf_lock);
2348                 return (STMF_BUSY);
2349         }
2350         for (ppilp = &stmf_state.stmf_ilplist; *ppilp != NULL;
2351             ppilp = &((*ppilp)->ilp_next)) {
2352                 if (*ppilp == ilp) {
2353                         *ppilp = ilp->ilp_next;
2354                         stmf_state.stmf_nlps--;
2355                         if (ilp->ilp_ppd) {
2356                                 ilp->ilp_ppd->ppd_provider = NULL;
2357                                 ilp->ilp_ppd = NULL;
2358                         }
2359                         mutex_exit(&stmf_state.stmf_lock);
2360                         return (STMF_SUCCESS);
2361                 }
2362         }
2363         mutex_exit(&stmf_state.stmf_lock);
2364         return (STMF_NOT_FOUND);
2365 }
2366 
2367 stmf_status_t
2368 stmf_register_port_provider(stmf_port_provider_t *pp)
2369 {
2370         stmf_i_port_provider_t *ipp =
2371             (stmf_i_port_provider_t *)pp->pp_stmf_private;
2372         stmf_pp_data_t *ppd;
2373         uint32_t cb_flags;
2374 
2375         if (pp->pp_portif_rev != PORTIF_REV_1)
2376                 return (STMF_FAILURE);
2377 
2378         mutex_enter(&stmf_state.stmf_lock);
2379         ipp->ipp_next = stmf_state.stmf_ipplist;
2380         stmf_state.stmf_ipplist = ipp;
2381         stmf_state.stmf_npps++;
2382         /* See if we need to do a callback */
2383         for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2384                 if (strcmp(ppd->ppd_name, pp->pp_name) == 0) {
2385                         break;
2386                 }
2387         }
2388         if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2389                 goto rpp_bail_out;
2390         }
2391         ipp->ipp_ppd = ppd;
2392         ppd->ppd_provider = ipp;
2393         if (pp->pp_cb == NULL)
2394                 goto rpp_bail_out;
2395         ipp->ipp_cb_in_progress = 1;
2396         cb_flags = STMF_PCB_PREG_COMPLETE;
2397         if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2398                 cb_flags |= STMF_PCB_STMF_ONLINING;
2399         mutex_exit(&stmf_state.stmf_lock);
2400         pp->pp_cb(pp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2401         mutex_enter(&stmf_state.stmf_lock);
2402         ipp->ipp_cb_in_progress = 0;
2403 
2404 rpp_bail_out:
2405         mutex_exit(&stmf_state.stmf_lock);
2406 
2407         return (STMF_SUCCESS);
2408 }
2409 
2410 stmf_status_t
2411 stmf_deregister_port_provider(stmf_port_provider_t *pp)
2412 {
2413         stmf_i_port_provider_t *ipp =
2414             (stmf_i_port_provider_t *)pp->pp_stmf_private;
2415         stmf_i_port_provider_t **ppipp;
2416 
2417         mutex_enter(&stmf_state.stmf_lock);
2418         if (ipp->ipp_npps || ipp->ipp_cb_in_progress) {
2419                 mutex_exit(&stmf_state.stmf_lock);
2420                 return (STMF_BUSY);
2421         }
2422         for (ppipp = &stmf_state.stmf_ipplist; *ppipp != NULL;
2423             ppipp = &((*ppipp)->ipp_next)) {
2424                 if (*ppipp == ipp) {
2425                         *ppipp = ipp->ipp_next;
2426                         stmf_state.stmf_npps--;
2427                         if (ipp->ipp_ppd) {
2428                                 ipp->ipp_ppd->ppd_provider = NULL;
2429                                 ipp->ipp_ppd = NULL;
2430                         }
2431                         mutex_exit(&stmf_state.stmf_lock);
2432                         return (STMF_SUCCESS);
2433                 }
2434         }
2435         mutex_exit(&stmf_state.stmf_lock);
2436         return (STMF_NOT_FOUND);
2437 }
2438 
2439 int
2440 stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
2441     uint32_t *err_ret)
2442 {
2443         stmf_i_port_provider_t          *ipp;
2444         stmf_i_lu_provider_t            *ilp;
2445         stmf_pp_data_t                  *ppd;
2446         nvlist_t                        *nv;
2447         int                             s;
2448         int                             ret;
2449 
2450         *err_ret = 0;
2451 
2452         if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2453                 return (EINVAL);
2454         }
2455 
2456         mutex_enter(&stmf_state.stmf_lock);
2457         for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2458                 if (ppi->ppi_lu_provider) {
2459                         if (!ppd->ppd_lu_provider)
2460                                 continue;
2461                 } else if (ppi->ppi_port_provider) {
2462                         if (!ppd->ppd_port_provider)
2463                                 continue;
2464                 }
2465                 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2466                         break;
2467         }
2468 
2469         if (ppd == NULL) {
2470                 /* New provider */
2471                 s = strlen(ppi->ppi_name);
2472                 if (s > 254) {
2473                         mutex_exit(&stmf_state.stmf_lock);
2474                         return (EINVAL);
2475                 }
2476                 s += sizeof (stmf_pp_data_t) - 7;
2477 
2478                 ppd = kmem_zalloc(s, KM_NOSLEEP);
2479                 if (ppd == NULL) {
2480                         mutex_exit(&stmf_state.stmf_lock);
2481                         return (ENOMEM);
2482                 }
2483                 ppd->ppd_alloc_size = s;
2484                 (void) strcpy(ppd->ppd_name, ppi->ppi_name);
2485 
2486                 /* See if this provider already exists */
2487                 if (ppi->ppi_lu_provider) {
2488                         ppd->ppd_lu_provider = 1;
2489                         for (ilp = stmf_state.stmf_ilplist; ilp != NULL;
2490                             ilp = ilp->ilp_next) {
2491                                 if (strcmp(ppi->ppi_name,
2492                                     ilp->ilp_lp->lp_name) == 0) {
2493                                         ppd->ppd_provider = ilp;
2494                                         ilp->ilp_ppd = ppd;
2495                                         break;
2496                                 }
2497                         }
2498                 } else {
2499                         ppd->ppd_port_provider = 1;
2500                         for (ipp = stmf_state.stmf_ipplist; ipp != NULL;
2501                             ipp = ipp->ipp_next) {
2502                                 if (strcmp(ppi->ppi_name,
2503                                     ipp->ipp_pp->pp_name) == 0) {
2504                                         ppd->ppd_provider = ipp;
2505                                         ipp->ipp_ppd = ppd;
2506                                         break;
2507                                 }
2508                         }
2509                 }
2510 
2511                 /* Link this ppd in */
2512                 ppd->ppd_next = stmf_state.stmf_ppdlist;
2513                 stmf_state.stmf_ppdlist = ppd;
2514         }
2515 
2516         /*
2517          * User is requesting that the token be checked.
2518          * If there was another set after the user's get
2519          * it's an error
2520          */
2521         if (ppi->ppi_token_valid) {
2522                 if (ppi->ppi_token != ppd->ppd_token) {
2523                         *err_ret = STMF_IOCERR_PPD_UPDATED;
2524                         mutex_exit(&stmf_state.stmf_lock);
2525                         return (EINVAL);
2526                 }
2527         }
2528 
2529         if ((ret = nvlist_unpack((char *)ppi->ppi_data,
2530             (size_t)ppi->ppi_data_size, &nv, KM_NOSLEEP)) != 0) {
2531                 mutex_exit(&stmf_state.stmf_lock);
2532                 return (ret);
2533         }
2534 
2535         /* Free any existing lists and add this one to the ppd */
2536         if (ppd->ppd_nv)
2537                 nvlist_free(ppd->ppd_nv);
2538         ppd->ppd_nv = nv;
2539 
2540         /* set the token for writes */
2541         ppd->ppd_token++;
2542         /* return token to caller */
2543         if (ppi_token) {
2544                 *ppi_token = ppd->ppd_token;
2545         }
2546 
2547         /* If there is a provider registered, do the notifications */
2548         if (ppd->ppd_provider) {
2549                 uint32_t cb_flags = 0;
2550 
2551                 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2552                         cb_flags |= STMF_PCB_STMF_ONLINING;
2553                 if (ppi->ppi_lu_provider) {
2554                         ilp = (stmf_i_lu_provider_t *)ppd->ppd_provider;
2555                         if (ilp->ilp_lp->lp_cb == NULL)
2556                                 goto bail_out;
2557                         ilp->ilp_cb_in_progress = 1;
2558                         mutex_exit(&stmf_state.stmf_lock);
2559                         ilp->ilp_lp->lp_cb(ilp->ilp_lp,
2560                             STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2561                         mutex_enter(&stmf_state.stmf_lock);
2562                         ilp->ilp_cb_in_progress = 0;
2563                 } else {
2564                         ipp = (stmf_i_port_provider_t *)ppd->ppd_provider;
2565                         if (ipp->ipp_pp->pp_cb == NULL)
2566                                 goto bail_out;
2567                         ipp->ipp_cb_in_progress = 1;
2568                         mutex_exit(&stmf_state.stmf_lock);
2569                         ipp->ipp_pp->pp_cb(ipp->ipp_pp,
2570                             STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2571                         mutex_enter(&stmf_state.stmf_lock);
2572                         ipp->ipp_cb_in_progress = 0;
2573                 }
2574         }
2575 
2576 bail_out:
2577         mutex_exit(&stmf_state.stmf_lock);
2578 
2579         return (0);
2580 }
2581 
2582 void
2583 stmf_delete_ppd(stmf_pp_data_t *ppd)
2584 {
2585         stmf_pp_data_t **pppd;
2586 
2587         ASSERT(mutex_owned(&stmf_state.stmf_lock));
2588         if (ppd->ppd_provider) {
2589                 if (ppd->ppd_lu_provider) {
2590                         ((stmf_i_lu_provider_t *)
2591                             ppd->ppd_provider)->ilp_ppd = NULL;
2592                 } else {
2593                         ((stmf_i_port_provider_t *)
2594                             ppd->ppd_provider)->ipp_ppd = NULL;
2595                 }
2596                 ppd->ppd_provider = NULL;
2597         }
2598 
2599         for (pppd = &stmf_state.stmf_ppdlist; *pppd != NULL;
2600             pppd = &((*pppd)->ppd_next)) {
2601                 if (*pppd == ppd)
2602                         break;
2603         }
2604 
2605         if (*pppd == NULL)
2606                 return;
2607 
2608         *pppd = ppd->ppd_next;
2609         if (ppd->ppd_nv)
2610                 nvlist_free(ppd->ppd_nv);
2611 
2612         kmem_free(ppd, ppd->ppd_alloc_size);
2613 }
2614 
2615 int
2616 stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi)
2617 {
2618         stmf_pp_data_t *ppd;
2619         int ret = ENOENT;
2620 
2621         if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2622                 return (EINVAL);
2623         }
2624 
2625         mutex_enter(&stmf_state.stmf_lock);
2626 
2627         for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2628                 if (ppi->ppi_lu_provider) {
2629                         if (!ppd->ppd_lu_provider)
2630                                 continue;
2631                 } else if (ppi->ppi_port_provider) {
2632                         if (!ppd->ppd_port_provider)
2633                                 continue;
2634                 }
2635                 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2636                         break;
2637         }
2638 
2639         if (ppd) {
2640                 ret = 0;
2641                 stmf_delete_ppd(ppd);
2642         }
2643         mutex_exit(&stmf_state.stmf_lock);
2644 
2645         return (ret);
2646 }
2647 
2648 int
2649 stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
2650     uint32_t *err_ret)
2651 {
2652         stmf_pp_data_t *ppd;
2653         size_t req_size;
2654         int ret = ENOENT;
2655         char *bufp = (char *)ppi_out->ppi_data;
2656 
2657         if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2658                 return (EINVAL);
2659         }
2660 
2661         mutex_enter(&stmf_state.stmf_lock);
2662 
2663         for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2664                 if (ppi->ppi_lu_provider) {
2665                         if (!ppd->ppd_lu_provider)
2666                                 continue;
2667                 } else if (ppi->ppi_port_provider) {
2668                         if (!ppd->ppd_port_provider)
2669                                 continue;
2670                 }
2671                 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2672                         break;
2673         }
2674 
2675         if (ppd && ppd->ppd_nv) {
2676                 ppi_out->ppi_token = ppd->ppd_token;
2677                 if ((ret = nvlist_size(ppd->ppd_nv, &req_size,
2678                     NV_ENCODE_XDR)) != 0) {
2679                         goto done;
2680                 }
2681                 ppi_out->ppi_data_size = req_size;
2682                 if (req_size > ppi->ppi_data_size) {
2683                         *err_ret = STMF_IOCERR_INSUFFICIENT_BUF;
2684                         ret = EINVAL;
2685                         goto done;
2686                 }
2687 
2688                 if ((ret = nvlist_pack(ppd->ppd_nv, &bufp, &req_size,
2689                     NV_ENCODE_XDR, 0)) != 0) {
2690                         goto done;
2691                 }
2692                 ret = 0;
2693         }
2694 
2695 done:
2696         mutex_exit(&stmf_state.stmf_lock);
2697 
2698         return (ret);
2699 }
2700 
2701 void
2702 stmf_delete_all_ppds()
2703 {
2704         stmf_pp_data_t *ppd, *nppd;
2705 
2706         ASSERT(mutex_owned(&stmf_state.stmf_lock));
2707         for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = nppd) {
2708                 nppd = ppd->ppd_next;
2709                 stmf_delete_ppd(ppd);
2710         }
2711 }
2712 
2713 /*
2714  * 16 is the max string length of a protocol_ident, increase
2715  * the size if needed.
2716  */
2717 #define STMF_KSTAT_LU_SZ        (STMF_GUID_INPUT + 1 + 256)
2718 #define STMF_KSTAT_TGT_SZ       (256 * 2 + 16)
2719 #define STMF_KSTAT_RPORT_DATAMAX        (sizeof (stmf_kstat_rport_info_t) / \
2720                                             sizeof (kstat_named_t))
2721 
2722 /*
2723  * This array matches the Protocol Identifier in stmf_ioctl.h
2724  */
2725 #define MAX_PROTO_STR_LEN       32
2726 
2727 char *protocol_ident[PROTOCOL_ANY] = {
2728         "Fibre Channel",
2729         "Parallel SCSI",
2730         "SSA",
2731         "IEEE_1394",
2732         "SRP",
2733         "iSCSI",
2734         "SAS",
2735         "ADT",
2736         "ATAPI",
2737         "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN"
2738 };
2739 
2740 /*
2741  * Update the lun wait/run queue count
2742  */
2743 static void
2744 stmf_update_kstat_lu_q(scsi_task_t *task, void func())
2745 {
2746         stmf_i_lu_t             *ilu;
2747         kstat_io_t              *kip;
2748 
2749         if (task->task_lu == dlun0)
2750                 return;
2751         ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2752         if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2753                 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2754                 if (kip != NULL) {
2755                         func(kip);
2756                 }
2757         }
2758 }
2759 
2760 /*
2761  * Update the target(lport) wait/run queue count
2762  */
2763 static void
2764 stmf_update_kstat_lport_q(scsi_task_t *task, void func())
2765 {
2766         stmf_i_local_port_t     *ilp;
2767         kstat_io_t              *kip;
2768 
2769         ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2770         if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2771                 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2772                 if (kip != NULL) {
2773                         mutex_enter(ilp->ilport_kstat_io->ks_lock);
2774                         func(kip);
2775                         mutex_exit(ilp->ilport_kstat_io->ks_lock);
2776                 }
2777         }
2778 }
2779 
2780 static void
2781 stmf_update_kstat_lport_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2782 {
2783         stmf_i_local_port_t     *ilp;
2784         kstat_io_t              *kip;
2785 
2786         ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2787         if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2788                 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2789                 if (kip != NULL) {
2790                         mutex_enter(ilp->ilport_kstat_io->ks_lock);
2791                         STMF_UPDATE_KSTAT_IO(kip, dbuf);
2792                         mutex_exit(ilp->ilport_kstat_io->ks_lock);
2793                 }
2794         }
2795 }
2796 
2797 static void
2798 stmf_update_kstat_rport_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2799 {
2800         stmf_i_scsi_session_t   *iss;
2801         stmf_i_remote_port_t    *irport;
2802         kstat_io_t              *kip;
2803 
2804         iss = task->task_session->ss_stmf_private;
2805         irport = iss->iss_irport;
2806         if (irport->irport_kstat_io != NULL) {
2807                 kip = KSTAT_IO_PTR(irport->irport_kstat_io);
2808                 mutex_enter(irport->irport_kstat_io->ks_lock);
2809                 STMF_UPDATE_KSTAT_IO(kip, dbuf);
2810                 mutex_exit(irport->irport_kstat_io->ks_lock);
2811         }
2812 }
2813 
2814 static void
2815 stmf_update_kstat_rport_estat(scsi_task_t *task)
2816 {
2817         stmf_i_scsi_task_t              *itask;
2818         stmf_i_scsi_session_t           *iss;
2819         stmf_i_remote_port_t            *irport;
2820         stmf_kstat_rport_estat_t        *ks_estat;
2821         hrtime_t                        lat = 0;
2822         uint32_t                        n = 0;
2823 
2824         itask = task->task_stmf_private;
2825         iss = task->task_session->ss_stmf_private;
2826         irport = iss->iss_irport;
2827 
2828         if (irport->irport_kstat_estat == NULL)
2829                 return;
2830 
2831         ks_estat = (stmf_kstat_rport_estat_t *)KSTAT_NAMED_PTR(
2832             irport->irport_kstat_estat);
2833 
2834         mutex_enter(irport->irport_kstat_estat->ks_lock);
2835 
2836         if (task->task_flags & TF_READ_DATA)
2837                 n = atomic_dec_32_nv(&irport->irport_nread_tasks);
2838         else if (task->task_flags & TF_WRITE_DATA)
2839                 n = atomic_dec_32_nv(&irport->irport_nwrite_tasks);
2840 
2841         if (itask->itask_read_xfer > 0) {
2842                 ks_estat->i_nread_tasks.value.ui64++;
2843                 lat = stmf_update_rport_timestamps(
2844                     &irport->irport_rdstart_timestamp,
2845                     &irport->irport_rddone_timestamp, itask);
2846                 if (n == 0)
2847                         ks_estat->i_rport_read_latency.value.ui64 += lat;
2848         } else if ((itask->itask_write_xfer > 0) ||
2849             (task->task_flags & TF_INITIAL_BURST)) {
2850                 ks_estat->i_nwrite_tasks.value.ui64++;
2851                 lat = stmf_update_rport_timestamps(
2852                     &irport->irport_wrstart_timestamp,
2853                     &irport->irport_wrdone_timestamp, itask);
2854                 if (n == 0)
2855                         ks_estat->i_rport_write_latency.value.ui64 += lat;
2856         }
2857 
2858         if (n == 0) {
2859                 if (task->task_flags & TF_READ_DATA) {
2860                         irport->irport_rdstart_timestamp = LLONG_MAX;
2861                         irport->irport_rddone_timestamp = 0;
2862                 } else if (task->task_flags & TF_WRITE_DATA) {
2863                         irport->irport_wrstart_timestamp = LLONG_MAX;
2864                         irport->irport_wrdone_timestamp = 0;
2865                 }
2866         }
2867 
2868         mutex_exit(irport->irport_kstat_estat->ks_lock);
2869 }
2870 
2871 static hrtime_t
2872 stmf_update_rport_timestamps(hrtime_t *start_tstamp, hrtime_t *done_tstamp,
2873     stmf_i_scsi_task_t *itask)
2874 {
2875         *start_tstamp = MIN(*start_tstamp, itask->itask_start_timestamp);
2876         if ((*done_tstamp == 0) &&
2877             (itask->itask_xfer_done_timestamp == 0)) {
2878                 *done_tstamp = *start_tstamp;
2879         } else {
2880                 *done_tstamp = MAX(*done_tstamp,
2881                     itask->itask_xfer_done_timestamp);
2882         }
2883 
2884         return (*done_tstamp - *start_tstamp);
2885 }
2886 
2887 static void
2888 stmf_update_kstat_lu_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2889 {
2890         stmf_i_lu_t             *ilu;
2891         kstat_io_t              *kip;
2892 
2893         ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2894         if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2895                 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2896                 if (kip != NULL) {
2897                         mutex_enter(ilu->ilu_kstat_io->ks_lock);
2898                         STMF_UPDATE_KSTAT_IO(kip, dbuf);
2899                         mutex_exit(ilu->ilu_kstat_io->ks_lock);
2900                 }
2901         }
2902 }
2903 
2904 static void
2905 stmf_create_kstat_lu(stmf_i_lu_t *ilu)
2906 {
2907         char                            ks_nm[KSTAT_STRLEN];
2908         stmf_kstat_lu_info_t            *ks_lu;
2909 
2910         /* create kstat lun info */
2911         ks_lu = (stmf_kstat_lu_info_t *)kmem_zalloc(STMF_KSTAT_LU_SZ,
2912             KM_NOSLEEP);
2913         if (ks_lu == NULL) {
2914                 cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2915                 return;
2916         }
2917 
2918         bzero(ks_nm, sizeof (ks_nm));
2919         (void) sprintf(ks_nm, "stmf_lu_%"PRIxPTR"", (uintptr_t)ilu);
2920         if ((ilu->ilu_kstat_info = kstat_create(STMF_MODULE_NAME, 0,
2921             ks_nm, "misc", KSTAT_TYPE_NAMED,
2922             sizeof (stmf_kstat_lu_info_t) / sizeof (kstat_named_t),
2923             KSTAT_FLAG_VIRTUAL)) == NULL) {
2924                 kmem_free(ks_lu, STMF_KSTAT_LU_SZ);
2925                 cmn_err(CE_WARN, "STMF: kstat_create lu failed");
2926                 return;
2927         }
2928 
2929         ilu->ilu_kstat_info->ks_data_size = STMF_KSTAT_LU_SZ;
2930         ilu->ilu_kstat_info->ks_data = ks_lu;
2931 
2932         kstat_named_init(&ks_lu->i_lun_guid, "lun-guid",
2933             KSTAT_DATA_STRING);
2934         kstat_named_init(&ks_lu->i_lun_alias, "lun-alias",
2935             KSTAT_DATA_STRING);
2936 
2937         /* convert guid to hex string */
2938         int             i;
2939         uint8_t         *p = ilu->ilu_lu->lu_id->ident;
2940         bzero(ilu->ilu_ascii_hex_guid, sizeof (ilu->ilu_ascii_hex_guid));
2941         for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
2942                 (void) sprintf(&ilu->ilu_ascii_hex_guid[i * 2], "%02x", p[i]);
2943         }
2944         kstat_named_setstr(&ks_lu->i_lun_guid,
2945             (const char *)ilu->ilu_ascii_hex_guid);
2946         kstat_named_setstr(&ks_lu->i_lun_alias,
2947             (const char *)ilu->ilu_lu->lu_alias);
2948         kstat_install(ilu->ilu_kstat_info);
2949 
2950         /* create kstat lun io */
2951         bzero(ks_nm, sizeof (ks_nm));
2952         (void) sprintf(ks_nm, "stmf_lu_io_%"PRIxPTR"", (uintptr_t)ilu);
2953         if ((ilu->ilu_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
2954             ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
2955                 cmn_err(CE_WARN, "STMF: kstat_create lu_io failed");
2956                 return;
2957         }
2958         mutex_init(&ilu->ilu_kstat_lock, NULL, MUTEX_DRIVER, 0);
2959         ilu->ilu_kstat_io->ks_lock = &ilu->ilu_kstat_lock;
2960         kstat_install(ilu->ilu_kstat_io);
2961 }
2962 
2963 static void
2964 stmf_create_kstat_lport(stmf_i_local_port_t *ilport)
2965 {
2966         char                            ks_nm[KSTAT_STRLEN];
2967         stmf_kstat_tgt_info_t           *ks_tgt;
2968         int                             id, len;
2969 
2970         /* create kstat lport info */
2971         ks_tgt = (stmf_kstat_tgt_info_t *)kmem_zalloc(STMF_KSTAT_TGT_SZ,
2972             KM_NOSLEEP);
2973         if (ks_tgt == NULL) {
2974                 cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2975                 return;
2976         }
2977 
2978         bzero(ks_nm, sizeof (ks_nm));
2979         (void) sprintf(ks_nm, "stmf_tgt_%"PRIxPTR"", (uintptr_t)ilport);
2980         if ((ilport->ilport_kstat_info = kstat_create(STMF_MODULE_NAME,
2981             0, ks_nm, "misc", KSTAT_TYPE_NAMED,
2982             sizeof (stmf_kstat_tgt_info_t) / sizeof (kstat_named_t),
2983             KSTAT_FLAG_VIRTUAL)) == NULL) {
2984                 kmem_free(ks_tgt, STMF_KSTAT_TGT_SZ);
2985                 cmn_err(CE_WARN, "STMF: kstat_create target failed");
2986                 return;
2987         }
2988 
2989         ilport->ilport_kstat_info->ks_data_size = STMF_KSTAT_TGT_SZ;
2990         ilport->ilport_kstat_info->ks_data = ks_tgt;
2991 
2992         kstat_named_init(&ks_tgt->i_tgt_name, "target-name",
2993             KSTAT_DATA_STRING);
2994         kstat_named_init(&ks_tgt->i_tgt_alias, "target-alias",
2995             KSTAT_DATA_STRING);
2996         kstat_named_init(&ks_tgt->i_protocol, "protocol",
2997             KSTAT_DATA_STRING);
2998 
2999         /* ident might not be null terminated */
3000         len = ilport->ilport_lport->lport_id->ident_length;
3001         bcopy(ilport->ilport_lport->lport_id->ident,
3002             ilport->ilport_kstat_tgt_name, len);
3003         ilport->ilport_kstat_tgt_name[len + 1] = NULL;
3004         kstat_named_setstr(&ks_tgt->i_tgt_name,
3005             (const char *)ilport->ilport_kstat_tgt_name);
3006         kstat_named_setstr(&ks_tgt->i_tgt_alias,
3007             (const char *)ilport->ilport_lport->lport_alias);
3008         /* protocol */
3009         if ((id = ilport->ilport_lport->lport_id->protocol_id) > PROTOCOL_ANY) {
3010                 cmn_err(CE_WARN, "STMF: protocol_id out of bound");
3011                 id = PROTOCOL_ANY;
3012         }
3013         kstat_named_setstr(&ks_tgt->i_protocol, protocol_ident[id]);
3014         kstat_install(ilport->ilport_kstat_info);
3015 
3016         /* create kstat lport io */
3017         bzero(ks_nm, sizeof (ks_nm));
3018         (void) sprintf(ks_nm, "stmf_tgt_io_%"PRIxPTR"", (uintptr_t)ilport);
3019         if ((ilport->ilport_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
3020             ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3021                 cmn_err(CE_WARN, "STMF: kstat_create target_io failed");
3022                 return;
3023         }
3024         mutex_init(&ilport->ilport_kstat_lock, NULL, MUTEX_DRIVER, 0);
3025         ilport->ilport_kstat_io->ks_lock = &ilport->ilport_kstat_lock;
3026         kstat_install(ilport->ilport_kstat_io);
3027 }
3028 
3029 /*
3030  * set the asymmetric access state for a logical unit
3031  * caller is responsible for establishing SCSI unit attention on
3032  * state change
3033  */
3034 stmf_status_t
3035 stmf_set_lu_access(stmf_lu_t *lu, uint8_t access_state)
3036 {
3037         stmf_i_lu_t *ilu;
3038         uint8_t *p1, *p2;
3039 
3040         if ((access_state != STMF_LU_STANDBY) &&
3041             (access_state != STMF_LU_ACTIVE)) {
3042                 return (STMF_INVALID_ARG);
3043         }
3044 
3045         p1 = &lu->lu_id->ident[0];
3046         mutex_enter(&stmf_state.stmf_lock);
3047         if (stmf_state.stmf_inventory_locked) {
3048                 mutex_exit(&stmf_state.stmf_lock);
3049                 return (STMF_BUSY);
3050         }
3051 
3052         for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
3053                 p2 = &ilu->ilu_lu->lu_id->ident[0];
3054                 if (bcmp(p1, p2, 16) == 0) {
3055                         break;
3056                 }
3057         }
3058 
3059         if (!ilu) {
3060                 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
3061         } else {
3062                 /*
3063                  * We're changing access state on an existing logical unit
3064                  * Send the proxy registration message for this logical unit
3065                  * if we're in alua mode.
3066                  * If the requested state is STMF_LU_ACTIVE, we want to register
3067                  * this logical unit.
3068                  * If the requested state is STMF_LU_STANDBY, we're going to
3069                  * abort all tasks for this logical unit.
3070                  */
3071                 if (stmf_state.stmf_alua_state == 1 &&
3072                     access_state == STMF_LU_ACTIVE) {
3073                         stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3074                         stmf_ic_msg_t *ic_reg_lun;
3075                         if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3076                             lu->lu_lp->lp_alua_support) {
3077                                 ilu->ilu_alua = 1;
3078                                 /* allocate the register message */
3079                                 ic_reg_lun = ic_lun_active_msg_alloc(p1,
3080                                     lu->lu_lp->lp_name,
3081                                     lu->lu_proxy_reg_arg_len,
3082                                     (uint8_t *)lu->lu_proxy_reg_arg,
3083                                     stmf_proxy_msg_id);
3084                                 /* send the message */
3085                                 if (ic_reg_lun) {
3086                                         ic_ret = ic_tx_msg(ic_reg_lun);
3087                                         if (ic_ret == STMF_IC_MSG_SUCCESS) {
3088                                                 stmf_proxy_msg_id++;
3089                                         }
3090                                 }
3091                         }
3092                 } else if (stmf_state.stmf_alua_state == 1 &&
3093                     access_state == STMF_LU_STANDBY) {
3094                         /* abort all tasks for this lu */
3095                         stmf_task_lu_killall(lu, NULL, STMF_ABORTED);
3096                 }
3097         }
3098 
3099         ilu->ilu_access = access_state;
3100 
3101         mutex_exit(&stmf_state.stmf_lock);
3102         return (STMF_SUCCESS);
3103 }
3104 
3105 
3106 stmf_status_t
3107 stmf_register_lu(stmf_lu_t *lu)
3108 {
3109         stmf_i_lu_t *ilu;
3110         uint8_t *p1, *p2;
3111         stmf_state_change_info_t ssci;
3112         stmf_id_data_t *luid;
3113 
3114         if ((lu->lu_id->ident_type != ID_TYPE_NAA) ||
3115             (lu->lu_id->ident_length != 16) ||
3116             ((lu->lu_id->ident[0] & 0xf0) != 0x60)) {
3117                 return (STMF_INVALID_ARG);
3118         }
3119         p1 = &lu->lu_id->ident[0];
3120         mutex_enter(&stmf_state.stmf_lock);
3121         if (stmf_state.stmf_inventory_locked) {
3122                 mutex_exit(&stmf_state.stmf_lock);
3123                 return (STMF_BUSY);
3124         }
3125 
3126         for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
3127                 p2 = &ilu->ilu_lu->lu_id->ident[0];
3128                 if (bcmp(p1, p2, 16) == 0) {
3129                         mutex_exit(&stmf_state.stmf_lock);
3130                         return (STMF_ALREADY);
3131                 }
3132         }
3133 
3134         ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
3135         luid = stmf_lookup_id(&stmf_state.stmf_luid_list,
3136             lu->lu_id->ident_length, lu->lu_id->ident);
3137         if (luid) {
3138                 luid->id_pt_to_object = (void *)ilu;
3139                 ilu->ilu_luid = luid;
3140         }
3141         ilu->ilu_alias = NULL;
3142 
3143         ilu->ilu_next = stmf_state.stmf_ilulist;
3144         ilu->ilu_prev = NULL;
3145         if (ilu->ilu_next)
3146                 ilu->ilu_next->ilu_prev = ilu;
3147         stmf_state.stmf_ilulist = ilu;
3148         stmf_state.stmf_nlus++;
3149         if (lu->lu_lp) {
3150                 ((stmf_i_lu_provider_t *)
3151                     (lu->lu_lp->lp_stmf_private))->ilp_nlus++;
3152         }
3153         ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
3154         STMF_EVENT_ALLOC_HANDLE(ilu->ilu_event_hdl);
3155         cv_init(&ilu->ilu_offline_pending_cv, NULL, CV_DRIVER, NULL);
3156         stmf_create_kstat_lu(ilu);
3157         /*
3158          * register with proxy module if available and logical unit
3159          * is in active state
3160          */
3161         if (stmf_state.stmf_alua_state == 1 &&
3162             ilu->ilu_access == STMF_LU_ACTIVE) {
3163                 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3164                 stmf_ic_msg_t *ic_reg_lun;
3165                 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3166                     lu->lu_lp->lp_alua_support) {
3167                         ilu->ilu_alua = 1;
3168                         /* allocate the register message */
3169                         ic_reg_lun = ic_reg_lun_msg_alloc(p1,
3170                             lu->lu_lp->lp_name, lu->lu_proxy_reg_arg_len,
3171                             (uint8_t *)lu->lu_proxy_reg_arg, stmf_proxy_msg_id);
3172                         /* send the message */
3173                         if (ic_reg_lun) {
3174                                 ic_ret = ic_tx_msg(ic_reg_lun);
3175                                 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3176                                         stmf_proxy_msg_id++;
3177                                 }
3178                         }
3179                 }
3180         }
3181         mutex_exit(&stmf_state.stmf_lock);
3182 
3183         /*  check the default state for lu */
3184         if (stmf_state.stmf_default_lu_state == STMF_STATE_OFFLINE) {
3185                 ilu->ilu_prev_state = STMF_STATE_OFFLINE;
3186         } else {
3187                 ilu->ilu_prev_state = STMF_STATE_ONLINE;
3188                 if (stmf_state.stmf_service_running) {
3189                         ssci.st_rflags = 0;
3190                         ssci.st_additional_info = NULL;
3191                         (void) stmf_ctl(STMF_CMD_LU_ONLINE, lu, &ssci);
3192                 }
3193         }
3194 
3195         /* XXX: Generate event */
3196         return (STMF_SUCCESS);
3197 }
3198 
3199 stmf_status_t
3200 stmf_deregister_lu(stmf_lu_t *lu)
3201 {
3202         stmf_i_lu_t *ilu;
3203 
3204         mutex_enter(&stmf_state.stmf_lock);
3205         if (stmf_state.stmf_inventory_locked) {
3206                 mutex_exit(&stmf_state.stmf_lock);
3207                 return (STMF_BUSY);
3208         }
3209         ilu = stmf_lookup_lu(lu);
3210         if (ilu == NULL) {
3211                 mutex_exit(&stmf_state.stmf_lock);
3212                 return (STMF_INVALID_ARG);
3213         }
3214         if (ilu->ilu_state == STMF_STATE_OFFLINE) {
3215                 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free);
3216                 while (ilu->ilu_flags & ILU_STALL_DEREGISTER) {
3217                         cv_wait(&stmf_state.stmf_cv, &stmf_state.stmf_lock);
3218                 }
3219                 if (ilu->ilu_ntasks) {
3220                         stmf_i_scsi_task_t *itask, *nitask;
3221 
3222                         nitask = ilu->ilu_tasks;
3223                         do {
3224                                 itask = nitask;
3225                                 nitask = itask->itask_lu_next;
3226                                 lu->lu_task_free(itask->itask_task);
3227                                 stmf_free(itask->itask_task);
3228                         } while (nitask != NULL);
3229 
3230                         ilu->ilu_tasks = ilu->ilu_free_tasks = NULL;
3231                         ilu->ilu_ntasks = ilu->ilu_ntasks_free = 0;
3232                 }
3233                 /* de-register with proxy if available */
3234                 if (ilu->ilu_access == STMF_LU_ACTIVE &&
3235                     stmf_state.stmf_alua_state == 1) {
3236                         /* de-register with proxy module */
3237                         stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3238                         stmf_ic_msg_t *ic_dereg_lun;
3239                         if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3240                             lu->lu_lp->lp_alua_support) {
3241                                 ilu->ilu_alua = 1;
3242                                 /* allocate the de-register message */
3243                                 ic_dereg_lun = ic_dereg_lun_msg_alloc(
3244                                     lu->lu_id->ident, lu->lu_lp->lp_name, 0,
3245                                     NULL, stmf_proxy_msg_id);
3246                                 /* send the message */
3247                                 if (ic_dereg_lun) {
3248                                         ic_ret = ic_tx_msg(ic_dereg_lun);
3249                                         if (ic_ret == STMF_IC_MSG_SUCCESS) {
3250                                                 stmf_proxy_msg_id++;
3251                                         }
3252                                 }
3253                         }
3254                 }
3255 
3256                 if (ilu->ilu_next)
3257                         ilu->ilu_next->ilu_prev = ilu->ilu_prev;
3258                 if (ilu->ilu_prev)
3259                         ilu->ilu_prev->ilu_next = ilu->ilu_next;
3260                 else
3261                         stmf_state.stmf_ilulist = ilu->ilu_next;
3262                 stmf_state.stmf_nlus--;
3263 
3264                 if (ilu == stmf_state.stmf_svc_ilu_draining) {
3265                         stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
3266                 }
3267                 if (ilu == stmf_state.stmf_svc_ilu_timing) {
3268                         stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
3269                 }
3270                 if (lu->lu_lp) {
3271                         ((stmf_i_lu_provider_t *)
3272                             (lu->lu_lp->lp_stmf_private))->ilp_nlus--;
3273                 }
3274                 if (ilu->ilu_luid) {
3275                         ((stmf_id_data_t *)ilu->ilu_luid)->id_pt_to_object =
3276                             NULL;
3277                         ilu->ilu_luid = NULL;
3278                 }
3279                 STMF_EVENT_FREE_HANDLE(ilu->ilu_event_hdl);
3280         } else {
3281                 mutex_exit(&stmf_state.stmf_lock);
3282                 return (STMF_BUSY);
3283         }
3284         if (ilu->ilu_kstat_info) {
3285                 kmem_free(ilu->ilu_kstat_info->ks_data, STMF_KSTAT_LU_SZ);
3286                 kstat_delete(ilu->ilu_kstat_info);
3287         }
3288         if (ilu->ilu_kstat_io) {
3289                 kstat_delete(ilu->ilu_kstat_io);
3290                 mutex_destroy(&ilu->ilu_kstat_lock);
3291         }
3292         cv_destroy(&ilu->ilu_offline_pending_cv);
3293         mutex_exit(&stmf_state.stmf_lock);
3294         return (STMF_SUCCESS);
3295 }
3296 
3297 void
3298 stmf_set_port_standby(stmf_local_port_t *lport, uint16_t rtpid)
3299 {
3300         stmf_i_local_port_t *ilport =
3301             (stmf_i_local_port_t *)lport->lport_stmf_private;
3302         ilport->ilport_rtpid = rtpid;
3303         ilport->ilport_standby = 1;
3304 }
3305 
3306 void
3307 stmf_set_port_alua(stmf_local_port_t *lport)
3308 {
3309         stmf_i_local_port_t *ilport =
3310             (stmf_i_local_port_t *)lport->lport_stmf_private;
3311         ilport->ilport_alua = 1;
3312 }
3313 
3314 stmf_status_t
3315 stmf_register_local_port(stmf_local_port_t *lport)
3316 {
3317         stmf_i_local_port_t *ilport;
3318         stmf_state_change_info_t ssci;
3319         int start_workers = 0;
3320 
3321         mutex_enter(&stmf_state.stmf_lock);
3322         if (stmf_state.stmf_inventory_locked) {
3323                 mutex_exit(&stmf_state.stmf_lock);
3324                 return (STMF_BUSY);
3325         }
3326         ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3327         rw_init(&ilport->ilport_lock, NULL, RW_DRIVER, NULL);
3328 
3329         ilport->ilport_instance =
3330             id_alloc_nosleep(stmf_state.stmf_ilport_inst_space);
3331         if (ilport->ilport_instance == -1) {
3332                 mutex_exit(&stmf_state.stmf_lock);
3333                 return (STMF_FAILURE);
3334         }
3335         ilport->ilport_next = stmf_state.stmf_ilportlist;
3336         ilport->ilport_prev = NULL;
3337         if (ilport->ilport_next)
3338                 ilport->ilport_next->ilport_prev = ilport;
3339         stmf_state.stmf_ilportlist = ilport;
3340         stmf_state.stmf_nlports++;
3341         if (lport->lport_pp) {
3342                 ((stmf_i_port_provider_t *)
3343                     (lport->lport_pp->pp_stmf_private))->ipp_npps++;
3344         }
3345         ilport->ilport_tg =
3346             stmf_lookup_group_for_target(lport->lport_id->ident,
3347             lport->lport_id->ident_length);
3348 
3349         /*
3350          * rtpid will/must be set if this is a standby port
3351          * only register ports that are not standby (proxy) ports
3352          * and ports that are alua participants (ilport_alua == 1)
3353          */
3354         if (ilport->ilport_standby == 0) {
3355                 ilport->ilport_rtpid = atomic_inc_16_nv(&stmf_rtpid_counter);
3356         }
3357 
3358         if (stmf_state.stmf_alua_state == 1 &&
3359             ilport->ilport_standby == 0 &&
3360             ilport->ilport_alua == 1) {
3361                 stmf_ic_msg_t *ic_reg_port;
3362                 stmf_ic_msg_status_t ic_ret;
3363                 stmf_local_port_t *lport;
3364                 lport = ilport->ilport_lport;
3365                 ic_reg_port = ic_reg_port_msg_alloc(
3366                     lport->lport_id, ilport->ilport_rtpid,
3367                     0, NULL, stmf_proxy_msg_id);
3368                 if (ic_reg_port) {
3369                         ic_ret = ic_tx_msg(ic_reg_port);
3370                         if (ic_ret == STMF_IC_MSG_SUCCESS) {
3371                                 ilport->ilport_reg_msgid = stmf_proxy_msg_id++;
3372                         } else {
3373                                 cmn_err(CE_WARN, "error on port registration "
3374                                 "port - %s", ilport->ilport_kstat_tgt_name);
3375                         }
3376                 }
3377         }
3378         STMF_EVENT_ALLOC_HANDLE(ilport->ilport_event_hdl);
3379         stmf_create_kstat_lport(ilport);
3380         if (stmf_workers_state == STMF_WORKERS_DISABLED) {
3381                 stmf_workers_state = STMF_WORKERS_ENABLING;
3382                 start_workers = 1;
3383         }
3384         mutex_exit(&stmf_state.stmf_lock);
3385 
3386         if (start_workers)
3387                 stmf_worker_init();
3388 
3389         /*  the default state of LPORT */
3390 
3391         if (stmf_state.stmf_default_lport_state == STMF_STATE_OFFLINE) {
3392                 ilport->ilport_prev_state = STMF_STATE_OFFLINE;
3393         } else {
3394                 ilport->ilport_prev_state = STMF_STATE_ONLINE;
3395                 if (stmf_state.stmf_service_running) {
3396                         ssci.st_rflags = 0;
3397                         ssci.st_additional_info = NULL;
3398                         (void) stmf_ctl(STMF_CMD_LPORT_ONLINE, lport, &ssci);
3399                 }
3400         }
3401 
3402         /* XXX: Generate event */
3403         return (STMF_SUCCESS);
3404 }
3405 
3406 stmf_status_t
3407 stmf_deregister_local_port(stmf_local_port_t *lport)
3408 {
3409         stmf_i_local_port_t *ilport;
3410 
3411         mutex_enter(&stmf_state.stmf_lock);
3412         if (stmf_state.stmf_inventory_locked) {
3413                 mutex_exit(&stmf_state.stmf_lock);
3414                 return (STMF_BUSY);
3415         }
3416 
3417         /* dequeue all object requests from active queue */
3418         stmf_svc_kill_obj_requests(lport);
3419 
3420         ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3421 
3422         /*
3423          * deregister ports that are not standby (proxy)
3424          */
3425         if (stmf_state.stmf_alua_state == 1 &&
3426             ilport->ilport_standby == 0 &&
3427             ilport->ilport_alua == 1) {
3428                 stmf_ic_msg_t *ic_dereg_port;
3429                 stmf_ic_msg_status_t ic_ret;
3430                 ic_dereg_port = ic_dereg_port_msg_alloc(
3431                     lport->lport_id, 0, NULL, stmf_proxy_msg_id);
3432                 if (ic_dereg_port) {
3433                         ic_ret = ic_tx_msg(ic_dereg_port);
3434                         if (ic_ret == STMF_IC_MSG_SUCCESS) {
3435                                 stmf_proxy_msg_id++;
3436                         }
3437                 }
3438         }
3439 
3440         if (ilport->ilport_nsessions == 0) {
3441                 if (ilport->ilport_next)
3442                         ilport->ilport_next->ilport_prev = ilport->ilport_prev;
3443                 if (ilport->ilport_prev)
3444                         ilport->ilport_prev->ilport_next = ilport->ilport_next;
3445                 else
3446                         stmf_state.stmf_ilportlist = ilport->ilport_next;
3447                 id_free(stmf_state.stmf_ilport_inst_space,
3448                     ilport->ilport_instance);
3449                 rw_destroy(&ilport->ilport_lock);
3450                 stmf_state.stmf_nlports--;
3451                 if (lport->lport_pp) {
3452                         ((stmf_i_port_provider_t *)
3453                             (lport->lport_pp->pp_stmf_private))->ipp_npps--;
3454                 }
3455                 ilport->ilport_tg = NULL;
3456                 STMF_EVENT_FREE_HANDLE(ilport->ilport_event_hdl);
3457         } else {
3458                 mutex_exit(&stmf_state.stmf_lock);
3459                 return (STMF_BUSY);
3460         }
3461         if (ilport->ilport_kstat_info) {
3462                 kmem_free(ilport->ilport_kstat_info->ks_data,
3463                     STMF_KSTAT_TGT_SZ);
3464                 kstat_delete(ilport->ilport_kstat_info);
3465         }
3466         if (ilport->ilport_kstat_io) {
3467                 kstat_delete(ilport->ilport_kstat_io);
3468                 mutex_destroy(&ilport->ilport_kstat_lock);
3469         }
3470         mutex_exit(&stmf_state.stmf_lock);
3471         return (STMF_SUCCESS);
3472 }
3473 
3474 /*
3475  * Rport id/instance mappings remain valid until STMF is unloaded
3476  */
3477 static int
3478 stmf_irport_compare(const void *void_irport1, const void *void_irport2)
3479 {
3480         const   stmf_i_remote_port_t    *irport1 = void_irport1;
3481         const   stmf_i_remote_port_t    *irport2 = void_irport2;
3482         int                     result;
3483 
3484         /* Sort by code set then ident */
3485         if (irport1->irport_id->code_set <
3486             irport2->irport_id->code_set) {
3487                 return (-1);
3488         } else if (irport1->irport_id->code_set >
3489             irport2->irport_id->code_set) {
3490                 return (1);
3491         }
3492 
3493         /* Next by ident length */
3494         if (irport1->irport_id->ident_length <
3495             irport2->irport_id->ident_length) {
3496                 return (-1);
3497         } else if (irport1->irport_id->ident_length >
3498             irport2->irport_id->ident_length) {
3499                 return (1);
3500         }
3501 
3502         /* Code set and ident length both match, now compare idents */
3503         result = memcmp(irport1->irport_id->ident,
3504             irport2->irport_id->ident,
3505             irport1->irport_id->ident_length);
3506 
3507         if (result < 0) {
3508                 return (-1);
3509         } else if (result > 0) {
3510                 return (1);
3511         }
3512 
3513         return (0);
3514 }
3515 
3516 static stmf_i_remote_port_t *
3517 stmf_irport_create(scsi_devid_desc_t *rport_devid)
3518 {
3519         int                     alloc_len;
3520         stmf_i_remote_port_t    *irport;
3521 
3522         /*
3523          * Lookup will bump the refcnt if there's an existing rport
3524          * context for this identifier.
3525          */
3526         ASSERT(mutex_owned(&stmf_state.stmf_lock));
3527 
3528         alloc_len = sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3529             rport_devid->ident_length - 1;
3530         irport = kmem_zalloc(alloc_len, KM_NOSLEEP);
3531         if (irport == NULL) {
3532                 return (NULL);
3533         }
3534 
3535         irport->irport_instance =
3536             id_alloc_nosleep(stmf_state.stmf_irport_inst_space);
3537         if (irport->irport_instance == -1) {
3538                 kmem_free(irport, alloc_len);
3539                 return (NULL);
3540         }
3541 
3542         irport->irport_id =
3543             (struct scsi_devid_desc *)(irport + 1); /* Ptr. Arith. */
3544         bcopy(rport_devid, irport->irport_id,
3545             sizeof (scsi_devid_desc_t) + rport_devid->ident_length - 1);
3546         irport->irport_refcnt = 1;
3547         mutex_init(&irport->irport_mutex, NULL, MUTEX_DEFAULT, NULL);
3548         irport->irport_rdstart_timestamp = LLONG_MAX;
3549         irport->irport_wrstart_timestamp = LLONG_MAX;
3550 
3551         return (irport);
3552 }
3553 
3554 static void
3555 stmf_irport_destroy(stmf_i_remote_port_t *irport)
3556 {
3557         stmf_destroy_kstat_rport(irport);
3558         id_free(stmf_state.stmf_irport_inst_space, irport->irport_instance);
3559         mutex_destroy(&irport->irport_mutex);
3560         kmem_free(irport, sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3561             irport->irport_id->ident_length - 1);
3562 }
3563 
3564 static void
3565 stmf_create_kstat_rport(stmf_i_remote_port_t *irport)
3566 {
3567         scsi_devid_desc_t *id = irport->irport_id;
3568         char ks_nm[KSTAT_STRLEN];
3569         stmf_kstat_rport_info_t *ks_info;
3570         stmf_kstat_rport_estat_t *ks_estat;
3571         char *ident = NULL;
3572 
3573         ks_info = kmem_zalloc(sizeof (*ks_info), KM_NOSLEEP);
3574         if (ks_info == NULL)
3575                 goto err_out;
3576 
3577         (void) snprintf(ks_nm, KSTAT_STRLEN, "stmf_rport_%"PRIxPTR"",
3578             (uintptr_t)irport);
3579         irport->irport_kstat_info = kstat_create(STMF_MODULE_NAME, 0,
3580             ks_nm, "misc", KSTAT_TYPE_NAMED,
3581             STMF_KSTAT_RPORT_DATAMAX - STMF_RPORT_INFO_LIMIT,
3582             KSTAT_FLAG_VIRTUAL | KSTAT_FLAG_VAR_SIZE);
3583         if (irport->irport_kstat_info == NULL) {
3584                 kmem_free(ks_info, sizeof (*ks_info));
3585                 goto err_out;
3586         }
3587 
3588         irport->irport_kstat_info->ks_data = ks_info;
3589         irport->irport_kstat_info->ks_private = irport;
3590         irport->irport_kstat_info->ks_update = stmf_kstat_rport_update;
3591         ident = kmem_alloc(id->ident_length + 1, KM_NOSLEEP);
3592         if (ident == NULL) {
3593                 kstat_delete(irport->irport_kstat_info);
3594                 irport->irport_kstat_info = NULL;
3595                 kmem_free(ks_info, sizeof (*ks_info));
3596                 goto err_out;
3597         }
3598 
3599         (void) memcpy(ident, id->ident, id->ident_length);
3600         ident[id->ident_length] = '\0';
3601         kstat_named_init(&ks_info->i_rport_name, "name", KSTAT_DATA_STRING);
3602         kstat_named_init(&ks_info->i_protocol, "protocol",
3603             KSTAT_DATA_STRING);
3604 
3605         kstat_named_setstr(&ks_info->i_rport_name, ident);
3606         kstat_named_setstr(&ks_info->i_protocol,
3607             protocol_ident[irport->irport_id->protocol_id]);
3608         irport->irport_kstat_info->ks_lock = &irport->irport_mutex;
3609         irport->irport_info_dirty = B_TRUE;
3610         kstat_install(irport->irport_kstat_info);
3611 
3612         (void) snprintf(ks_nm, KSTAT_STRLEN, "stmf_rport_io_%"PRIxPTR"",
3613             (uintptr_t)irport);
3614         irport->irport_kstat_io = kstat_create(STMF_MODULE_NAME, 0, ks_nm,
3615             "io", KSTAT_TYPE_IO, 1, 0);
3616         if (irport->irport_kstat_io == NULL)
3617                 goto err_out;
3618 
3619         irport->irport_kstat_io->ks_lock = &irport->irport_mutex;
3620         kstat_install(irport->irport_kstat_io);
3621 
3622         (void) snprintf(ks_nm, KSTAT_STRLEN, "stmf_rport_st_%"PRIxPTR"",
3623             (uintptr_t)irport);
3624         irport->irport_kstat_estat = kstat_create(STMF_MODULE_NAME, 0, ks_nm,
3625             "misc", KSTAT_TYPE_NAMED,
3626             sizeof (*ks_estat) / sizeof (kstat_named_t), 0);
3627         if (irport->irport_kstat_estat == NULL)
3628                 goto err_out;
3629 
3630         ks_estat = (stmf_kstat_rport_estat_t *)KSTAT_NAMED_PTR(
3631             irport->irport_kstat_estat);
3632         kstat_named_init(&ks_estat->i_rport_read_latency,
3633             "rlatency", KSTAT_DATA_UINT64);
3634         kstat_named_init(&ks_estat->i_rport_write_latency,
3635             "wlatency", KSTAT_DATA_UINT64);
3636         kstat_named_init(&ks_estat->i_nread_tasks, "rntasks",
3637             KSTAT_DATA_UINT64);
3638         kstat_named_init(&ks_estat->i_nwrite_tasks, "wntasks",
3639             KSTAT_DATA_UINT64);
3640         irport->irport_kstat_estat->ks_lock = &irport->irport_mutex;
3641         kstat_install(irport->irport_kstat_estat);
3642 
3643         return;
3644 
3645 err_out:
3646         (void) memcpy(ks_nm, id->ident, MAX(KSTAT_STRLEN - 1,
3647             id->ident_length));
3648         ks_nm[id->ident_length] = '\0';
3649         cmn_err(CE_WARN, "STMF: remote port kstat creation failed: %s", ks_nm);
3650 }
3651 
3652 static void
3653 stmf_destroy_kstat_rport(stmf_i_remote_port_t *irport)
3654 {
3655         if (irport->irport_kstat_io != NULL) {
3656                 kstat_delete(irport->irport_kstat_io);
3657         }
3658         if (irport->irport_kstat_info != NULL) {
3659                 stmf_kstat_rport_info_t *ks_info;
3660                 kstat_named_t *knp;
3661                 void *ptr;
3662                 int i;
3663 
3664                 ks_info = (stmf_kstat_rport_info_t *)KSTAT_NAMED_PTR(
3665                     irport->irport_kstat_info);
3666                 kstat_delete(irport->irport_kstat_info);
3667                 ptr = KSTAT_NAMED_STR_PTR(&ks_info->i_rport_name);
3668                 kmem_free(ptr, KSTAT_NAMED_STR_BUFLEN(&ks_info->i_rport_name));
3669 
3670                 for (i = 0, knp = ks_info->i_rport_uinfo;
3671                     i < STMF_RPORT_INFO_LIMIT; i++, knp++) {
3672                         ptr = KSTAT_NAMED_STR_PTR(knp);
3673                         if (ptr != NULL)
3674                         kmem_free(ptr, KSTAT_NAMED_STR_BUFLEN(knp));
3675                 }
3676                 kmem_free(ks_info, sizeof (*ks_info));
3677         }
3678 }
3679 
3680 static stmf_i_remote_port_t *
3681 stmf_irport_register(scsi_devid_desc_t *rport_devid)
3682 {
3683         stmf_i_remote_port_t    *irport;
3684 
3685         mutex_enter(&stmf_state.stmf_lock);
3686 
3687         /*
3688          * Lookup will bump the refcnt if there's an existing rport
3689          * context for this identifier.
3690          */
3691         if ((irport = stmf_irport_lookup_locked(rport_devid)) != NULL) {
3692                 mutex_exit(&stmf_state.stmf_lock);
3693                 return (irport);
3694         }
3695 
3696         irport = stmf_irport_create(rport_devid);
3697         if (irport == NULL) {
3698                 mutex_exit(&stmf_state.stmf_lock);
3699                 return (NULL);
3700         }
3701 
3702         stmf_create_kstat_rport(irport);
3703         avl_add(&stmf_state.stmf_irportlist, irport);
3704         mutex_exit(&stmf_state.stmf_lock);
3705 
3706         return (irport);
3707 }
3708 
3709 static stmf_i_remote_port_t *
3710 stmf_irport_lookup_locked(scsi_devid_desc_t *rport_devid)
3711 {
3712         stmf_i_remote_port_t    *irport;
3713         stmf_i_remote_port_t    tmp_irport;
3714 
3715         ASSERT(mutex_owned(&stmf_state.stmf_lock));
3716         tmp_irport.irport_id = rport_devid;
3717         irport = avl_find(&stmf_state.stmf_irportlist, &tmp_irport, NULL);
3718         if (irport != NULL) {
3719                 mutex_enter(&irport->irport_mutex);
3720                 irport->irport_refcnt++;
3721                 mutex_exit(&irport->irport_mutex);
3722         }
3723 
3724         return (irport);
3725 }
3726 
3727 static void
3728 stmf_irport_deregister(stmf_i_remote_port_t *irport)
3729 {
3730         /*
3731          * If we were actually going to remove unreferenced remote ports
3732          * we would want to acquire stmf_state.stmf_lock before getting
3733          * the irport mutex.
3734          *
3735          * Instead we're just going to leave it there even if unreferenced.
3736          */
3737         mutex_enter(&irport->irport_mutex);
3738         irport->irport_refcnt--;
3739         mutex_exit(&irport->irport_mutex);
3740 }
3741 
3742 /*
3743  * Port provider has to make sure that register/deregister session and
3744  * port are serialized calls.
3745  */
3746 stmf_status_t
3747 stmf_register_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3748 {
3749         stmf_i_scsi_session_t *iss;
3750         stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3751             lport->lport_stmf_private;
3752         uint8_t         lun[8];
3753 
3754         /*
3755          * Port state has to be online to register a scsi session. It is
3756          * possible that we started an offline operation and a new SCSI
3757          * session started at the same time (in that case also we are going
3758          * to fail the registeration). But any other state is simply
3759          * a bad port provider implementation.
3760          */
3761         if (ilport->ilport_state != STMF_STATE_ONLINE) {
3762                 if (ilport->ilport_state != STMF_STATE_OFFLINING) {
3763                         stmf_trace(lport->lport_alias, "Port is trying to "
3764                             "register a session while the state is neither "
3765                             "online nor offlining");
3766                 }
3767                 return (STMF_FAILURE);
3768         }
3769         bzero(lun, 8);
3770         iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3771         if ((iss->iss_irport = stmf_irport_register(ss->ss_rport_id)) == NULL) {
3772                 stmf_trace(lport->lport_alias, "Could not register "
3773                     "remote port during session registration");
3774                 return (STMF_FAILURE);
3775         }
3776 
3777         iss->iss_flags |= ISS_BEING_CREATED;
3778 
3779         if (ss->ss_rport == NULL) {
3780                 iss->iss_flags |= ISS_NULL_TPTID;
3781                 ss->ss_rport = stmf_scsilib_devid_to_remote_port(
3782                     ss->ss_rport_id);
3783                 if (ss->ss_rport == NULL) {
3784                         iss->iss_flags &= ~(ISS_NULL_TPTID | ISS_BEING_CREATED);
3785                         stmf_trace(lport->lport_alias, "Device id to "
3786                             "remote port conversion failed");
3787                         return (STMF_FAILURE);
3788                 }
3789         } else {
3790                 if (!stmf_scsilib_tptid_validate(ss->ss_rport->rport_tptid,
3791                     ss->ss_rport->rport_tptid_sz, NULL)) {
3792                         iss->iss_flags &= ~ISS_BEING_CREATED;
3793                         stmf_trace(lport->lport_alias, "Remote port "
3794                             "transport id validation failed");
3795                         return (STMF_FAILURE);
3796                 }
3797         }
3798 
3799         /* sessions use the ilport_lock. No separate lock is required */
3800         iss->iss_lockp = &ilport->ilport_lock;
3801 
3802         if (iss->iss_sm != NULL)
3803                 cmn_err(CE_PANIC, "create lun map called with non NULL map");
3804         iss->iss_sm = (stmf_lun_map_t *)kmem_zalloc(sizeof (stmf_lun_map_t),
3805             KM_SLEEP);
3806 
3807         mutex_enter(&stmf_state.stmf_lock);
3808         rw_enter(&ilport->ilport_lock, RW_WRITER);
3809         (void) stmf_session_create_lun_map(ilport, iss);
3810         ilport->ilport_nsessions++;
3811         iss->iss_next = ilport->ilport_ss_list;
3812         ilport->ilport_ss_list = iss;
3813         rw_exit(&ilport->ilport_lock);
3814         mutex_exit(&stmf_state.stmf_lock);
3815 
3816         iss->iss_creation_time = ddi_get_time();
3817         ss->ss_session_id = atomic_inc_64_nv(&stmf_session_counter);
3818         iss->iss_flags &= ~ISS_BEING_CREATED;
3819         /* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */
3820         iss->iss_flags &= ~ISS_LUN_INVENTORY_CHANGED;
3821         DTRACE_PROBE2(session__online, stmf_local_port_t *, lport,
3822             stmf_scsi_session_t *, ss);
3823         return (STMF_SUCCESS);
3824 }
3825 
3826 stmf_status_t
3827 stmf_add_rport_info(stmf_scsi_session_t *ss,
3828     const char *prop_name, const char *prop_value)
3829 {
3830         stmf_i_scsi_session_t *iss = ss->ss_stmf_private;
3831         stmf_i_remote_port_t *irport = iss->iss_irport;
3832         kstat_named_t *knp;
3833         char *s;
3834         int i;
3835 
3836         s = strdup(prop_value);
3837 
3838         mutex_enter(irport->irport_kstat_info->ks_lock);
3839         /* Make sure the caller doesn't try to add already existing property */
3840         knp = KSTAT_NAMED_PTR(irport->irport_kstat_info);
3841         for (i = 0; i < STMF_KSTAT_RPORT_DATAMAX; i++, knp++) {
3842                 if (KSTAT_NAMED_STR_PTR(knp) == NULL)
3843                         break;
3844 
3845                 ASSERT(strcmp(knp->name, prop_name) != 0);
3846         }
3847 
3848         if (i == STMF_KSTAT_RPORT_DATAMAX) {
3849                 mutex_exit(irport->irport_kstat_info->ks_lock);
3850                 kmem_free(s, strlen(s) + 1);
3851                 return (STMF_FAILURE);
3852         }
3853 
3854         irport->irport_info_dirty = B_TRUE;
3855         kstat_named_init(knp, prop_name, KSTAT_DATA_STRING);
3856         kstat_named_setstr(knp, s);
3857         mutex_exit(irport->irport_kstat_info->ks_lock);
3858 
3859         return (STMF_SUCCESS);
3860 }
3861 
3862 void
3863 stmf_remove_rport_info(stmf_scsi_session_t *ss,
3864     const char *prop_name)
3865 {
3866         stmf_i_scsi_session_t *iss = ss->ss_stmf_private;
3867         stmf_i_remote_port_t *irport = iss->iss_irport;
3868         kstat_named_t *knp;
3869         char *s;
3870         int i;
3871         uint32_t len;
3872 
3873         mutex_enter(irport->irport_kstat_info->ks_lock);
3874         knp = KSTAT_NAMED_PTR(irport->irport_kstat_info);
3875         for (i = 0; i < STMF_KSTAT_RPORT_DATAMAX; i++, knp++) {
3876                 if ((knp->name != NULL) && (strcmp(knp->name, prop_name) == 0))
3877                         break;
3878         }
3879 
3880         if (i == STMF_KSTAT_RPORT_DATAMAX) {
3881                 mutex_exit(irport->irport_kstat_info->ks_lock);
3882                 return;
3883         }
3884 
3885         s = KSTAT_NAMED_STR_PTR(knp);
3886         len = KSTAT_NAMED_STR_BUFLEN(knp);
3887 
3888         for (; i < STMF_KSTAT_RPORT_DATAMAX - 1; i++, knp++) {
3889                 kstat_named_init(knp, knp[1].name, KSTAT_DATA_STRING);
3890                 kstat_named_setstr(knp, KSTAT_NAMED_STR_PTR(&knp[1]));
3891         }
3892         kstat_named_init(knp, "", KSTAT_DATA_STRING);
3893 
3894         irport->irport_info_dirty = B_TRUE;
3895         mutex_exit(irport->irport_kstat_info->ks_lock);
3896         kmem_free(s, len);
3897 }
3898 
3899 static int
3900 stmf_kstat_rport_update(kstat_t *ksp, int rw)
3901 {
3902         stmf_i_remote_port_t *irport = ksp->ks_private;
3903         kstat_named_t *knp;
3904         uint_t ndata = 0;
3905         size_t dsize = 0;
3906         int i;
3907 
3908         if (rw == KSTAT_WRITE)
3909                 return (EACCES);
3910 
3911         if (!irport->irport_info_dirty)
3912                 return (0);
3913 
3914         knp = KSTAT_NAMED_PTR(ksp);
3915         for (i = 0; i < STMF_KSTAT_RPORT_DATAMAX; i++, knp++) {
3916                 if (KSTAT_NAMED_STR_PTR(knp) == NULL)
3917                         break;
3918                 ndata++;
3919                 dsize += KSTAT_NAMED_STR_BUFLEN(knp);
3920         }
3921 
3922         ksp->ks_ndata = ndata;
3923         ksp->ks_data_size = sizeof (kstat_named_t) * ndata + dsize;
3924         irport->irport_info_dirty = B_FALSE;
3925 
3926         return (0);
3927 }
3928 
3929 void
3930 stmf_deregister_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3931 {
3932         stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3933             lport->lport_stmf_private;
3934         stmf_i_scsi_session_t *iss, **ppss;
3935         int found = 0;
3936         stmf_ic_msg_t *ic_session_dereg;
3937         stmf_status_t ic_ret = STMF_FAILURE;
3938         stmf_lun_map_t *sm;
3939         stmf_i_lu_t *ilu;
3940         uint16_t n;
3941         stmf_lun_map_ent_t *ent;
3942 
3943         DTRACE_PROBE2(session__offline, stmf_local_port_t *, lport,
3944             stmf_scsi_session_t *, ss);
3945 
3946         iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3947         if (ss->ss_rport_alias) {
3948                 ss->ss_rport_alias = NULL;
3949         }
3950 
3951 try_dereg_ss_again:
3952         mutex_enter(&stmf_state.stmf_lock);
3953         atomic_and_32(&iss->iss_flags,
3954             ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
3955         if (iss->iss_flags & ISS_EVENT_ACTIVE) {
3956                 mutex_exit(&stmf_state.stmf_lock);
3957                 delay(1);
3958                 goto try_dereg_ss_again;
3959         }
3960 
3961         /* dereg proxy session if not standby port */
3962         if (stmf_state.stmf_alua_state == 1 &&
3963             ilport->ilport_standby == 0 &&
3964             ilport->ilport_alua == 1) {
3965                 ic_session_dereg = ic_session_dereg_msg_alloc(
3966                     ss, stmf_proxy_msg_id);
3967                 if (ic_session_dereg) {
3968                         ic_ret = ic_tx_msg(ic_session_dereg);
3969                         if (ic_ret == STMF_IC_MSG_SUCCESS) {
3970                                 stmf_proxy_msg_id++;
3971                         }
3972                 }
3973         }
3974 
3975         rw_enter(&ilport->ilport_lock, RW_WRITER);
3976         for (ppss = &ilport->ilport_ss_list; *ppss != NULL;
3977             ppss = &((*ppss)->iss_next)) {
3978                 if (iss == (*ppss)) {
3979                         *ppss = (*ppss)->iss_next;
3980                         found = 1;
3981                         break;
3982                 }
3983         }
3984         if (!found) {
3985                 cmn_err(CE_PANIC, "Deregister session called for non existent"
3986                     " session");
3987         }
3988         ilport->ilport_nsessions--;
3989 
3990         stmf_irport_deregister(iss->iss_irport);
3991         /*
3992          * to avoid conflict with updating session's map,
3993          * which only grab stmf_lock
3994          */
3995         sm = iss->iss_sm;
3996         iss->iss_sm = NULL;
3997         iss->iss_hg = NULL;
3998 
3999         rw_exit(&ilport->ilport_lock);
4000 
4001         if (sm->lm_nentries) {
4002                 for (n = 0; n < sm->lm_nentries; n++) {
4003                         if ((ent = (stmf_lun_map_ent_t *)sm->lm_plus[n])
4004                             != NULL) {
4005                                 if (ent->ent_itl_datap) {
4006                                         stmf_do_itl_dereg(ent->ent_lu,
4007                                             ent->ent_itl_datap,
4008                                             STMF_ITL_REASON_IT_NEXUS_LOSS);
4009                                 }
4010                                 ilu = (stmf_i_lu_t *)
4011                                     ent->ent_lu->lu_stmf_private;
4012                                 atomic_dec_32(&ilu->ilu_ref_cnt);
4013                                 kmem_free(sm->lm_plus[n],
4014                                     sizeof (stmf_lun_map_ent_t));
4015                         }
4016                 }
4017                 kmem_free(sm->lm_plus,
4018                     sizeof (stmf_lun_map_ent_t *) * sm->lm_nentries);
4019         }
4020         kmem_free(sm, sizeof (*sm));
4021 
4022         if (iss->iss_flags & ISS_NULL_TPTID) {
4023                 stmf_remote_port_free(ss->ss_rport);
4024         }
4025 
4026         mutex_exit(&stmf_state.stmf_lock);
4027 }
4028 
4029 
4030 
4031 stmf_i_scsi_session_t *
4032 stmf_session_id_to_issptr(uint64_t session_id, int stay_locked)
4033 {
4034         stmf_i_local_port_t *ilport;
4035         stmf_i_scsi_session_t *iss;
4036 
4037         mutex_enter(&stmf_state.stmf_lock);
4038         for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
4039             ilport = ilport->ilport_next) {
4040                 rw_enter(&ilport->ilport_lock, RW_WRITER);
4041                 for (iss = ilport->ilport_ss_list; iss != NULL;
4042                     iss = iss->iss_next) {
4043                         if (iss->iss_ss->ss_session_id == session_id) {
4044                                 if (!stay_locked)
4045                                         rw_exit(&ilport->ilport_lock);
4046                                 mutex_exit(&stmf_state.stmf_lock);
4047                                 return (iss);
4048                         }
4049                 }
4050                 rw_exit(&ilport->ilport_lock);
4051         }
4052         mutex_exit(&stmf_state.stmf_lock);
4053         return (NULL);
4054 }
4055 
4056 void
4057 stmf_release_itl_handle(stmf_lu_t *lu, stmf_itl_data_t *itl)
4058 {
4059         stmf_itl_data_t **itlpp;
4060         stmf_i_lu_t *ilu;
4061 
4062         ASSERT(itl->itl_flags & STMF_ITL_BEING_TERMINATED);
4063 
4064         ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4065         mutex_enter(&ilu->ilu_task_lock);
4066         for (itlpp = &ilu->ilu_itl_list; (*itlpp) != NULL;
4067             itlpp = &(*itlpp)->itl_next) {
4068                 if ((*itlpp) == itl)
4069                         break;
4070         }
4071         ASSERT((*itlpp) != NULL);
4072         *itlpp = itl->itl_next;
4073         mutex_exit(&ilu->ilu_task_lock);
4074         lu->lu_abort(lu, STMF_LU_ITL_HANDLE_REMOVED, itl->itl_handle,
4075             (uint32_t)itl->itl_hdlrm_reason);
4076 
4077         kmem_free(itl, sizeof (*itl));
4078 }
4079 
4080 stmf_status_t
4081 stmf_register_itl_handle(stmf_lu_t *lu, uint8_t *lun,
4082     stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle)
4083 {
4084         stmf_itl_data_t *itl;
4085         stmf_i_scsi_session_t *iss;
4086         stmf_lun_map_ent_t *lun_map_ent;
4087         stmf_i_lu_t *ilu;
4088         uint16_t n;
4089 
4090         ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4091         if (ss == NULL) {
4092                 iss = stmf_session_id_to_issptr(session_id, 1);
4093                 if (iss == NULL)
4094                         return (STMF_NOT_FOUND);
4095         } else {
4096                 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4097         }
4098 
4099         mutex_enter(&stmf_state.stmf_lock);
4100         rw_enter(iss->iss_lockp, RW_WRITER);
4101         n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4102         lun_map_ent = (stmf_lun_map_ent_t *)
4103             stmf_get_ent_from_map(iss->iss_sm, n);
4104         if ((lun_map_ent == NULL) || (lun_map_ent->ent_lu != lu)) {
4105                 rw_exit(iss->iss_lockp);
4106                 mutex_exit(&stmf_state.stmf_lock);
4107                 return (STMF_NOT_FOUND);
4108         }
4109         if (lun_map_ent->ent_itl_datap != NULL) {
4110                 rw_exit(iss->iss_lockp);
4111                 mutex_exit(&stmf_state.stmf_lock);
4112                 return (STMF_ALREADY);
4113         }
4114 
4115         itl = (stmf_itl_data_t *)kmem_zalloc(sizeof (*itl), KM_NOSLEEP);
4116         if (itl == NULL) {
4117                 rw_exit(iss->iss_lockp);
4118                 mutex_exit(&stmf_state.stmf_lock);
4119                 return (STMF_ALLOC_FAILURE);
4120         }
4121 
4122         itl->itl_ilu = ilu;
4123         itl->itl_session = iss;
4124         itl->itl_counter = 1;
4125         itl->itl_lun = n;
4126         itl->itl_handle = itl_handle;
4127 
4128         mutex_enter(&ilu->ilu_task_lock);
4129         itl->itl_next = ilu->ilu_itl_list;
4130         ilu->ilu_itl_list = itl;
4131         mutex_exit(&ilu->ilu_task_lock);
4132         lun_map_ent->ent_itl_datap = itl;
4133         rw_exit(iss->iss_lockp);
4134         mutex_exit(&stmf_state.stmf_lock);
4135 
4136         return (STMF_SUCCESS);
4137 }
4138 
4139 void
4140 stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason)
4141 {
4142         uint8_t old, new;
4143 
4144         do {
4145                 old = new = itl->itl_flags;
4146                 if (old & STMF_ITL_BEING_TERMINATED)
4147                         return;
4148                 new |= STMF_ITL_BEING_TERMINATED;
4149         } while (atomic_cas_8(&itl->itl_flags, old, new) != old);
4150         itl->itl_hdlrm_reason = hdlrm_reason;
4151 
4152         ASSERT(itl->itl_counter);
4153 
4154         if (atomic_dec_32_nv(&itl->itl_counter))
4155                 return;
4156 
4157         stmf_release_itl_handle(lu, itl);
4158 }
4159 
4160 stmf_status_t
4161 stmf_deregister_all_lu_itl_handles(stmf_lu_t *lu)
4162 {
4163         stmf_i_lu_t *ilu;
4164         stmf_i_local_port_t *ilport;
4165         stmf_i_scsi_session_t *iss;
4166         stmf_lun_map_t *lm;
4167         stmf_lun_map_ent_t *ent;
4168         uint32_t nmaps, nu;
4169         stmf_itl_data_t **itl_list;
4170         int i;
4171 
4172         ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4173 
4174 dereg_itl_start:;
4175         nmaps = ilu->ilu_ref_cnt;
4176         if (nmaps == 0)
4177                 return (STMF_NOT_FOUND);
4178         itl_list = (stmf_itl_data_t **)kmem_zalloc(
4179             nmaps * sizeof (stmf_itl_data_t *), KM_SLEEP);
4180         mutex_enter(&stmf_state.stmf_lock);
4181         if (nmaps != ilu->ilu_ref_cnt) {
4182                 /* Something changed, start all over */
4183                 mutex_exit(&stmf_state.stmf_lock);
4184                 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4185                 goto dereg_itl_start;
4186         }
4187         nu = 0;
4188         for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
4189             ilport = ilport->ilport_next) {
4190                 rw_enter(&ilport->ilport_lock, RW_WRITER);
4191                 for (iss = ilport->ilport_ss_list; iss != NULL;
4192                     iss = iss->iss_next) {
4193                         lm = iss->iss_sm;
4194                         if (!lm)
4195                                 continue;
4196                         for (i = 0; i < lm->lm_nentries; i++) {
4197                                 if (lm->lm_plus[i] == NULL)
4198                                         continue;
4199                                 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4200                                 if ((ent->ent_lu == lu) &&
4201                                     (ent->ent_itl_datap)) {
4202                                         itl_list[nu++] = ent->ent_itl_datap;
4203                                         ent->ent_itl_datap = NULL;
4204                                         if (nu == nmaps) {
4205                                                 rw_exit(&ilport->ilport_lock);
4206                                                 goto dai_scan_done;
4207                                         }
4208                                 }
4209                         } /* lun table for a session */
4210                 } /* sessions */
4211                 rw_exit(&ilport->ilport_lock);
4212         } /* ports */
4213 
4214 dai_scan_done:
4215         mutex_exit(&stmf_state.stmf_lock);
4216 
4217         for (i = 0; i < nu; i++) {
4218                 stmf_do_itl_dereg(lu, itl_list[i],
4219                     STMF_ITL_REASON_DEREG_REQUEST);
4220         }
4221         kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4222 
4223         return (STMF_SUCCESS);
4224 }
4225 
4226 stmf_data_buf_t *
4227 stmf_alloc_dbuf(scsi_task_t *task, uint32_t size, uint32_t *pminsize,
4228     uint32_t flags)
4229 {
4230         stmf_i_scsi_task_t *itask =
4231             (stmf_i_scsi_task_t *)task->task_stmf_private;
4232         stmf_local_port_t *lport = task->task_lport;
4233         stmf_data_buf_t *dbuf;
4234         uint8_t ndx;
4235 
4236         ndx = stmf_first_zero[itask->itask_allocated_buf_map];
4237         if (ndx == 0xff)
4238                 return (NULL);
4239         dbuf = itask->itask_dbufs[ndx] = lport->lport_ds->ds_alloc_data_buf(
4240             task, size, pminsize, flags);
4241         if (dbuf) {
4242                 task->task_cur_nbufs++;
4243                 itask->itask_allocated_buf_map |= (1 << ndx);
4244                 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
4245                 dbuf->db_handle = ndx;
4246                 return (dbuf);
4247         }
4248 
4249         return (NULL);
4250 }
4251 
4252 stmf_status_t
4253 stmf_setup_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t flags)
4254 {
4255         stmf_i_scsi_task_t *itask =
4256             (stmf_i_scsi_task_t *)task->task_stmf_private;
4257         stmf_local_port_t *lport = task->task_lport;
4258         uint8_t ndx;
4259         stmf_status_t ret;
4260 
4261         ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF);
4262         ASSERT(lport->lport_ds->ds_setup_dbuf != NULL);
4263         ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
4264 
4265         if ((task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF) == 0)
4266                 return (STMF_FAILURE);
4267         if (lport->lport_ds->ds_setup_dbuf == NULL)
4268                 return (STMF_FAILURE);
4269 
4270         ndx = stmf_first_zero[itask->itask_allocated_buf_map];
4271         if (ndx == 0xff)
4272                 return (STMF_FAILURE);
4273         ret = lport->lport_ds->ds_setup_dbuf(task, dbuf, flags);
4274         if (ret == STMF_FAILURE)
4275                 return (STMF_FAILURE);
4276         itask->itask_dbufs[ndx] = dbuf;
4277         task->task_cur_nbufs++;
4278         itask->itask_allocated_buf_map |= (1 << ndx);
4279         dbuf->db_handle = ndx;
4280 
4281         return (STMF_SUCCESS);
4282 }
4283 
4284 void
4285 stmf_teardown_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf)
4286 {
4287         stmf_i_scsi_task_t *itask =
4288             (stmf_i_scsi_task_t *)task->task_stmf_private;
4289         stmf_local_port_t *lport = task->task_lport;
4290 
4291         ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF);
4292         ASSERT(lport->lport_ds->ds_teardown_dbuf != NULL);
4293         ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
4294 
4295         itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle);
4296         task->task_cur_nbufs--;
4297         lport->lport_ds->ds_teardown_dbuf(lport->lport_ds, dbuf);
4298 }
4299 
4300 void
4301 stmf_free_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf)
4302 {
4303         stmf_i_scsi_task_t *itask =
4304             (stmf_i_scsi_task_t *)task->task_stmf_private;
4305         stmf_local_port_t *lport = task->task_lport;
4306 
4307         itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle);
4308         task->task_cur_nbufs--;
4309         lport->lport_ds->ds_free_data_buf(lport->lport_ds, dbuf);
4310 }
4311 
4312 stmf_data_buf_t *
4313 stmf_handle_to_buf(scsi_task_t *task, uint8_t h)
4314 {
4315         stmf_i_scsi_task_t *itask;
4316 
4317         itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4318         if (h > 3)
4319                 return (NULL);
4320         return (itask->itask_dbufs[h]);
4321 }
4322 
4323 /* ARGSUSED */
4324 struct scsi_task *
4325 stmf_task_alloc(struct stmf_local_port *lport, stmf_scsi_session_t *ss,
4326     uint8_t *lun, uint16_t cdb_length_in, uint16_t ext_id)
4327 {
4328         stmf_lu_t *lu;
4329         stmf_i_scsi_session_t *iss;
4330         stmf_i_lu_t *ilu;
4331         stmf_i_scsi_task_t *itask;
4332         stmf_i_scsi_task_t **ppitask;
4333         scsi_task_t *task;
4334         uint8_t *l;
4335         stmf_lun_map_ent_t *lun_map_ent;
4336         uint16_t cdb_length;
4337         uint16_t luNbr;
4338         uint8_t new_task = 0;
4339 
4340         /*
4341          * We allocate 7 extra bytes for CDB to provide a cdb pointer which
4342          * is guaranteed to be 8 byte aligned. Some LU providers like OSD
4343          * depend upon this alignment.
4344          */
4345         if (cdb_length_in >= 16)
4346                 cdb_length = cdb_length_in + 7;
4347         else
4348                 cdb_length = 16 + 7;
4349         iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4350         luNbr = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4351         rw_enter(iss->iss_lockp, RW_READER);
4352         lun_map_ent =
4353             (stmf_lun_map_ent_t *)stmf_get_ent_from_map(iss->iss_sm, luNbr);
4354         if (!lun_map_ent) {
4355                 lu = dlun0;
4356         } else {
4357                 lu = lun_map_ent->ent_lu;
4358         }
4359 
4360         ilu = lu->lu_stmf_private;
4361         if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4362                 rw_exit(iss->iss_lockp);
4363                 return (NULL);
4364         }
4365 
4366         /*
4367          * if the LUN is being offlined or is offline then only command
4368          * that are to query the LUN are allowed.  These are handled in
4369          * stmf via the dlun0 vector.  It is possible that a race condition
4370          * will cause other commands to arrive while the lun is in the
4371          * process of being offlined.  Check for those and just let the
4372          * protocol stack handle the error.
4373          */
4374         if ((ilu->ilu_state == STMF_STATE_OFFLINING) ||
4375             (ilu->ilu_state == STMF_STATE_OFFLINE)) {
4376                 if (lu != dlun0) {
4377                         rw_exit(iss->iss_lockp);
4378                         return (NULL);
4379                 }
4380         }
4381 
4382         do {
4383                 if (ilu->ilu_free_tasks == NULL) {
4384                         new_task = 1;
4385                         break;
4386                 }
4387                 mutex_enter(&ilu->ilu_task_lock);
4388                 for (ppitask = &ilu->ilu_free_tasks; (*ppitask != NULL) &&
4389                     ((*ppitask)->itask_cdb_buf_size < cdb_length);
4390                     ppitask = &((*ppitask)->itask_lu_free_next))
4391                         ;
4392                 if (*ppitask) {
4393                         itask = *ppitask;
4394                         *ppitask = (*ppitask)->itask_lu_free_next;
4395                         ilu->ilu_ntasks_free--;
4396                         if (ilu->ilu_ntasks_free < ilu->ilu_ntasks_min_free)
4397                                 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4398                 } else {
4399                         new_task = 1;
4400                 }
4401                 mutex_exit(&ilu->ilu_task_lock);
4402         /* CONSTCOND */
4403         } while (0);
4404 
4405         if (!new_task) {
4406                 /*
4407                  * Save the task_cdb pointer and zero per cmd fields.
4408                  * We know the task_cdb_length is large enough by task
4409                  * selection process above.
4410                  */
4411                 uint8_t *save_cdb;
4412                 uintptr_t t_start, t_end;
4413 
4414                 task = itask->itask_task;
4415                 save_cdb = task->task_cdb;   /* save */
4416                 t_start = (uintptr_t)&task->task_flags;
4417                 t_end = (uintptr_t)&task->task_extended_cmd;
4418                 bzero((void *)t_start, (size_t)(t_end - t_start));
4419                 task->task_cdb = save_cdb;   /* restore */
4420                 itask->itask_ncmds = 0;
4421         } else {
4422                 task = (scsi_task_t *)stmf_alloc(STMF_STRUCT_SCSI_TASK,
4423                     cdb_length, AF_FORCE_NOSLEEP);
4424                 if (task == NULL) {
4425                         rw_exit(iss->iss_lockp);
4426                         return (NULL);
4427                 }
4428                 task->task_lu = lu;
4429                 task->task_cdb = (uint8_t *)task->task_port_private;
4430                 if ((ulong_t)(task->task_cdb) & 7ul) {
4431                         task->task_cdb = (uint8_t *)(((ulong_t)
4432                             (task->task_cdb) + 7ul) & ~(7ul));
4433                 }
4434                 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4435                 itask->itask_cdb_buf_size = cdb_length;
4436                 mutex_init(&itask->itask_audit_mutex, NULL, MUTEX_DRIVER, NULL);
4437                 mutex_init(&itask->itask_mutex, NULL, MUTEX_DRIVER, NULL);
4438         }
4439 
4440         /*
4441          * Since a LUN can be mapped as different LUN ids to different initiator
4442          * groups, we need to set LUN id for a new task and reset LUN id for
4443          * a reused task.
4444          */
4445         l = task->task_lun_no;
4446         l[0] = lun[0];
4447         l[1] = lun[1];
4448         l[2] = lun[2];
4449         l[3] = lun[3];
4450         l[4] = lun[4];
4451         l[5] = lun[5];
4452         l[6] = lun[6];
4453         l[7] = lun[7];
4454 
4455         mutex_enter(&itask->itask_mutex);
4456         task->task_session = ss;
4457         task->task_lport = lport;
4458         task->task_cdb_length = cdb_length_in;
4459         itask->itask_flags = ITASK_IN_TRANSITION;
4460         itask->itask_waitq_time = 0;
4461         itask->itask_lu_read_time = itask->itask_lu_write_time = 0;
4462         itask->itask_lport_read_time = itask->itask_lport_write_time = 0;
4463         itask->itask_read_xfer = itask->itask_write_xfer = 0;
4464         itask->itask_audit_index = 0;
4465         bzero(&itask->itask_audit_records[0],
4466             sizeof (stmf_task_audit_rec_t) * ITASK_TASK_AUDIT_DEPTH);
4467         mutex_exit(&itask->itask_mutex);
4468 
4469         if (new_task) {
4470                 if (lu->lu_task_alloc(task) != STMF_SUCCESS) {
4471                         rw_exit(iss->iss_lockp);
4472                         stmf_free(task);
4473                         return (NULL);
4474                 }
4475                 mutex_enter(&ilu->ilu_task_lock);
4476                 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4477                         mutex_exit(&ilu->ilu_task_lock);
4478                         rw_exit(iss->iss_lockp);
4479                         stmf_free(task);
4480                         return (NULL);
4481                 }
4482                 itask->itask_lu_next = ilu->ilu_tasks;
4483                 if (ilu->ilu_tasks)
4484                         ilu->ilu_tasks->itask_lu_prev = itask;
4485                 ilu->ilu_tasks = itask;
4486                 /* kmem_zalloc automatically makes itask->itask_lu_prev NULL */
4487                 ilu->ilu_ntasks++;
4488                 mutex_exit(&ilu->ilu_task_lock);
4489         }
4490 
4491         itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr;
4492         atomic_inc_32(itask->itask_ilu_task_cntr);
4493         itask->itask_start_time = ddi_get_lbolt();
4494 
4495         if ((lun_map_ent != NULL) && ((itask->itask_itl_datap =
4496             lun_map_ent->ent_itl_datap) != NULL)) {
4497                 atomic_inc_32(&itask->itask_itl_datap->itl_counter);
4498                 task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle;
4499         } else {
4500                 itask->itask_itl_datap = NULL;
4501                 task->task_lu_itl_handle = NULL;
4502         }
4503 
4504         rw_exit(iss->iss_lockp);
4505         return (task);
4506 }
4507 
4508 /* ARGSUSED */
4509 static void
4510 stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss)
4511 {
4512         stmf_i_scsi_task_t *itask =
4513             (stmf_i_scsi_task_t *)task->task_stmf_private;
4514         stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4515 
4516         ASSERT(rw_lock_held(iss->iss_lockp));
4517         ASSERT((itask->itask_flags & ITASK_IN_FREE_LIST) == 0);
4518         ASSERT((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0);
4519         ASSERT((itask->itask_flags & ITASK_IN_TRANSITION) == 0);
4520         ASSERT((itask->itask_flags & ITASK_KNOWN_TO_LU) == 0);
4521         ASSERT(mutex_owned(&itask->itask_mutex));
4522 
4523         itask->itask_flags = ITASK_IN_FREE_LIST;
4524         itask->itask_ncmds = 0;
4525         itask->itask_proxy_msg_id = 0;
4526         atomic_dec_32(itask->itask_ilu_task_cntr);
4527         itask->itask_worker_next = NULL;
4528         mutex_exit(&itask->itask_mutex);
4529 
4530         mutex_enter(&ilu->ilu_task_lock);
4531         itask->itask_lu_free_next = ilu->ilu_free_tasks;
4532         ilu->ilu_free_tasks = itask;
4533         ilu->ilu_ntasks_free++;
4534         if (ilu->ilu_ntasks == ilu->ilu_ntasks_free)
4535                 cv_signal(&ilu->ilu_offline_pending_cv);
4536         mutex_exit(&ilu->ilu_task_lock);
4537 }
4538 
4539 void
4540 stmf_task_lu_check_freelist(stmf_i_lu_t *ilu)
4541 {
4542         uint32_t        num_to_release, ndx;
4543         stmf_i_scsi_task_t *itask;
4544         stmf_lu_t       *lu = ilu->ilu_lu;
4545 
4546         ASSERT(ilu->ilu_ntasks_min_free <= ilu->ilu_ntasks_free);
4547 
4548         /* free half of the minimal free of the free tasks */
4549         num_to_release = (ilu->ilu_ntasks_min_free + 1) / 2;
4550         if (!num_to_release) {
4551                 return;
4552         }
4553         for (ndx = 0; ndx < num_to_release; ndx++) {
4554                 mutex_enter(&ilu->ilu_task_lock);
4555                 itask = ilu->ilu_free_tasks;
4556                 if (itask == NULL) {
4557                         mutex_exit(&ilu->ilu_task_lock);
4558                         break;
4559                 }
4560                 ilu->ilu_free_tasks = itask->itask_lu_free_next;
4561                 ilu->ilu_ntasks_free--;
4562                 mutex_exit(&ilu->ilu_task_lock);
4563 
4564                 lu->lu_task_free(itask->itask_task);
4565                 mutex_enter(&ilu->ilu_task_lock);
4566                 if (itask->itask_lu_next)
4567                         itask->itask_lu_next->itask_lu_prev =
4568                             itask->itask_lu_prev;
4569                 if (itask->itask_lu_prev)
4570                         itask->itask_lu_prev->itask_lu_next =
4571                             itask->itask_lu_next;
4572                 else
4573                         ilu->ilu_tasks = itask->itask_lu_next;
4574 
4575                 ilu->ilu_ntasks--;
4576                 mutex_exit(&ilu->ilu_task_lock);
4577                 stmf_free(itask->itask_task);
4578         }
4579 }
4580 
4581 /*
4582  * Called with stmf_lock held
4583  */
4584 void
4585 stmf_check_freetask()
4586 {
4587         stmf_i_lu_t *ilu;
4588         clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000);
4589 
4590         /* stmf_svc_ilu_draining may get changed after stmf_lock is released */
4591         while ((ilu = stmf_state.stmf_svc_ilu_draining) != NULL) {
4592                 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
4593                 if (!ilu->ilu_ntasks_min_free) {
4594                         ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4595                         continue;
4596                 }
4597                 ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4598                 mutex_exit(&stmf_state.stmf_lock);
4599                 stmf_task_lu_check_freelist(ilu);
4600                 /*
4601                  * we do not care about the accuracy of
4602                  * ilu_ntasks_min_free, so we don't lock here
4603                  */
4604                 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4605                 mutex_enter(&stmf_state.stmf_lock);
4606                 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4607                 cv_broadcast(&stmf_state.stmf_cv);
4608                 if (ddi_get_lbolt() >= endtime)
4609                         break;
4610         }
4611 }
4612 
4613 /*
4614  * Since this method is looking to find tasks that are stuck, lost, or senile
4615  * it should be more willing to give up scaning during this time period. This
4616  * is why mutex_tryenter is now used instead of the standard mutex_enter.
4617  * There has been at least one case were the following occurred.
4618  *
4619  * 1) The iscsit_deferred() method is trying to register a session and
4620  *    needs the global lock which is held.
4621  * 2) Another thread which holds the global lock is trying to deregister a
4622  *    session and needs the session lock.
4623  * 3) A third thread is allocating a stmf task that has grabbed the session
4624  *    lock and is trying to grab the lun task lock.
4625  * 4) There's a timeout thread that has the lun task lock and is trying to grab
4626  *    a specific task lock.
4627  * 5) The thread that has the task lock is waiting for the ref count to go to
4628  *    zero.
4629  * 6) There's a task that would drop the count to zero, but it's in the task
4630  *    queue waiting to run and is stuck because of #1 is currently block.
4631  *
4632  * This method is number 4 in the above chain of events. Had this code
4633  * originally used mutex_tryenter the chain would have been broken and the
4634  * system wouldn't have hung. So, now this method uses mutex_tryenter and
4635  * you know why it does so.
4636  */
4637 /* ---- Only one thread calls stmf_do_ilu_timeouts so no lock required ---- */
4638 typedef struct stmf_bailout_cnt {
4639         int     no_ilu_lock;
4640         int     no_task_lock;
4641         int     tasks_checked;
4642 } stmf_bailout_cnt_t;
4643 
4644 stmf_bailout_cnt_t stmf_bailout;
4645 
4646 static void
4647 stmf_do_ilu_timeouts(stmf_i_lu_t *ilu)
4648 {
4649         clock_t l = ddi_get_lbolt();
4650         clock_t ps = drv_usectohz(1000000);
4651         stmf_i_scsi_task_t *itask;
4652         scsi_task_t *task;
4653         uint32_t to;
4654 
4655         if (mutex_tryenter(&ilu->ilu_task_lock) == 0) {
4656                 stmf_bailout.no_ilu_lock++;
4657                 return;
4658         }
4659 
4660         for (itask = ilu->ilu_tasks; itask != NULL;
4661             itask = itask->itask_lu_next) {
4662                 if (mutex_tryenter(&itask->itask_mutex) == 0) {
4663                         stmf_bailout.no_task_lock++;
4664                         continue;
4665                 }
4666                 stmf_bailout.tasks_checked++;
4667                 if (itask->itask_flags & (ITASK_IN_FREE_LIST |
4668                     ITASK_BEING_ABORTED)) {
4669                         mutex_exit(&itask->itask_mutex);
4670                         continue;
4671                 }
4672                 task = itask->itask_task;
4673                 if (task->task_timeout == 0)
4674                         to = stmf_default_task_timeout;
4675                 else
4676                         to = task->task_timeout;
4677 
4678                 if ((itask->itask_start_time + (to * ps)) > l) {
4679                         mutex_exit(&itask->itask_mutex);
4680                         continue;
4681                 }
4682                 mutex_exit(&itask->itask_mutex);
4683                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
4684                     STMF_TIMEOUT, NULL);
4685         }
4686         mutex_exit(&ilu->ilu_task_lock);
4687 }
4688 
4689 /*
4690  * Called with stmf_lock held
4691  */
4692 void
4693 stmf_check_ilu_timing()
4694 {
4695         stmf_i_lu_t *ilu;
4696         clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000);
4697 
4698         /* stmf_svc_ilu_timing may get changed after stmf_lock is released */
4699         while ((ilu = stmf_state.stmf_svc_ilu_timing) != NULL) {
4700                 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
4701                 if (ilu->ilu_cur_task_cntr == (&ilu->ilu_task_cntr1)) {
4702                         if (ilu->ilu_task_cntr2 == 0) {
4703                                 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr2;
4704                                 continue;
4705                         }
4706                 } else {
4707                         if (ilu->ilu_task_cntr1 == 0) {
4708                                 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
4709                                 continue;
4710                         }
4711                 }
4712                 /*
4713                  * If we are here then it means that there is some slowdown
4714                  * in tasks on this lu. We need to check.
4715                  */
4716                 ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4717                 mutex_exit(&stmf_state.stmf_lock);
4718                 stmf_do_ilu_timeouts(ilu);
4719                 mutex_enter(&stmf_state.stmf_lock);
4720                 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4721                 cv_broadcast(&stmf_state.stmf_cv);
4722                 if (ddi_get_lbolt() >= endtime)
4723                         break;
4724         }
4725 }
4726 
4727 /*
4728  * Kills all tasks on a lu except tm_task
4729  */
4730 void
4731 stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s)
4732 {
4733         stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4734         stmf_i_scsi_task_t *itask;
4735 
4736         mutex_enter(&ilu->ilu_task_lock);
4737         for (itask = ilu->ilu_tasks; itask != NULL;
4738             itask = itask->itask_lu_next) {
4739                 mutex_enter(&itask->itask_mutex);
4740                 if (itask->itask_flags & ITASK_IN_FREE_LIST) {
4741                         mutex_exit(&itask->itask_mutex);
4742                         continue;
4743                 }
4744                 mutex_exit(&itask->itask_mutex);
4745                 if (itask->itask_task == tm_task)
4746                         continue;
4747                 stmf_abort(STMF_QUEUE_TASK_ABORT, itask->itask_task, s, NULL);
4748         }
4749         mutex_exit(&ilu->ilu_task_lock);
4750 }
4751 
4752 void
4753 stmf_free_task_bufs(stmf_i_scsi_task_t *itask, stmf_local_port_t *lport)
4754 {
4755         int i;
4756         uint8_t map;
4757 
4758         if ((map = itask->itask_allocated_buf_map) == 0)
4759                 return;
4760         for (i = 0; i < 4; i++) {
4761                 if (map & 1) {
4762                         stmf_data_buf_t *dbuf;
4763 
4764                         dbuf = itask->itask_dbufs[i];
4765                         if (dbuf->db_xfer_start_timestamp) {
4766                                 stmf_lport_xfer_done(itask, dbuf);
4767                         }
4768                         if (dbuf->db_flags & DB_LU_DATA_BUF) {
4769                                 /*
4770                                  * LU needs to clean up buffer.
4771                                  * LU is required to free the buffer
4772                                  * in the xfer_done handler.
4773                                  */
4774                                 scsi_task_t *task = itask->itask_task;
4775                                 stmf_lu_t *lu = task->task_lu;
4776 
4777                                 lu->lu_dbuf_free(task, dbuf);
4778                                 ASSERT(((itask->itask_allocated_buf_map>>i)
4779                                     & 1) == 0); /* must be gone */
4780                         } else {
4781                                 ASSERT(dbuf->db_lu_private == NULL);
4782                                 dbuf->db_lu_private = NULL;
4783                                 lport->lport_ds->ds_free_data_buf(
4784                                     lport->lport_ds, dbuf);
4785                         }
4786                 }
4787                 map >>= 1;
4788         }
4789         itask->itask_allocated_buf_map = 0;
4790 }
4791 
4792 void
4793 stmf_task_free(scsi_task_t *task)
4794 {
4795         stmf_local_port_t *lport = task->task_lport;
4796         stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4797             task->task_stmf_private;
4798         stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
4799             task->task_session->ss_stmf_private;
4800         stmf_lu_t *lu = task->task_lu;
4801 
4802         stmf_task_audit(itask, TE_TASK_FREE, CMD_OR_IOF_NA, NULL);
4803         ASSERT(mutex_owned(&itask->itask_mutex));
4804         if ((lu != NULL) && (lu->lu_task_done != NULL))
4805                 lu->lu_task_done(task);
4806         stmf_free_task_bufs(itask, lport);
4807         stmf_itl_task_done(itask);
4808         DTRACE_PROBE2(stmf__task__end, scsi_task_t *, task,
4809             hrtime_t,
4810             itask->itask_done_timestamp - itask->itask_start_timestamp);
4811         if (itask->itask_itl_datap) {
4812                 if (atomic_dec_32_nv(&itask->itask_itl_datap->itl_counter) ==
4813                     0) {
4814                         stmf_release_itl_handle(task->task_lu,
4815                             itask->itask_itl_datap);
4816                 }
4817         }
4818 
4819         /*
4820          * To prevent a deadlock condition must release the itask_mutex,
4821          * grab a reader lock on iss_lockp and then reacquire the itask_mutex.
4822          */
4823         mutex_exit(&itask->itask_mutex);
4824         rw_enter(iss->iss_lockp, RW_READER);
4825         mutex_enter(&itask->itask_mutex);
4826 
4827         lport->lport_task_free(task);
4828         if (itask->itask_worker) {
4829                 atomic_dec_32(&stmf_cur_ntasks);
4830                 atomic_dec_32(&itask->itask_worker->worker_ref_count);
4831         }
4832         /*
4833          * After calling stmf_task_lu_free, the task pointer can no longer
4834          * be trusted.
4835          */
4836         stmf_task_lu_free(task, iss);
4837         rw_exit(iss->iss_lockp);
4838 }
4839 
4840 void
4841 stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
4842 {
4843         stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4844             task->task_stmf_private;
4845         stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4846         int nv;
4847         uint32_t new;
4848         uint32_t ct;
4849         stmf_worker_t *w;
4850         uint8_t tm;
4851 
4852         if (task->task_max_nbufs > 4)
4853                 task->task_max_nbufs = 4;
4854         task->task_cur_nbufs = 0;
4855         /* Latest value of currently running tasks */
4856         ct = atomic_inc_32_nv(&stmf_cur_ntasks);
4857 
4858         /* Select the next worker using round robin */
4859         mutex_enter(&stmf_worker_sel_mx);
4860         stmf_worker_sel_counter++;
4861         if (stmf_worker_sel_counter >= stmf_nworkers)
4862                 stmf_worker_sel_counter = 0;
4863         nv = stmf_worker_sel_counter;
4864 
4865         /* if the selected worker is not idle then bump to the next worker */
4866         if (stmf_workers[nv].worker_queue_depth > 0) {
4867                 stmf_worker_sel_counter++;
4868                 if (stmf_worker_sel_counter >= stmf_nworkers)
4869                         stmf_worker_sel_counter = 0;
4870                 nv = stmf_worker_sel_counter;
4871         }
4872         mutex_exit(&stmf_worker_sel_mx);
4873 
4874         w = &stmf_workers[nv];
4875 
4876         mutex_enter(&itask->itask_mutex);
4877         mutex_enter(&w->worker_lock);
4878 
4879         itask->itask_worker = w;
4880 
4881         /*
4882          * Track max system load inside the worker as we already have the
4883          * worker lock (no point implementing another lock). The service
4884          * thread will do the comparisons and figure out the max overall
4885          * system load.
4886          */
4887         if (w->worker_max_sys_qdepth_pu < ct)
4888                 w->worker_max_sys_qdepth_pu = ct;
4889 
4890         new = itask->itask_flags;
4891         new |= ITASK_KNOWN_TO_TGT_PORT;
4892         if (task->task_mgmt_function) {
4893                 tm = task->task_mgmt_function;
4894                 if ((tm == TM_TARGET_RESET) ||
4895                     (tm == TM_TARGET_COLD_RESET) ||
4896                     (tm == TM_TARGET_WARM_RESET)) {
4897                         new |= ITASK_DEFAULT_HANDLING;
4898                 }
4899         } else if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
4900                 new |= ITASK_DEFAULT_HANDLING;
4901         }
4902         new &= ~ITASK_IN_TRANSITION;
4903         itask->itask_flags = new;
4904 
4905         stmf_itl_task_start(itask);
4906 
4907         itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK;
4908         itask->itask_ncmds = 1;
4909 
4910         if ((task->task_flags & TF_INITIAL_BURST) &&
4911             !(curthread->t_flag & T_INTR_THREAD)) {
4912                 stmf_update_kstat_lu_io(task, dbuf);
4913                 stmf_update_kstat_lport_io(task, dbuf);
4914                 stmf_update_kstat_rport_io(task, dbuf);
4915         }
4916 
4917         stmf_task_audit(itask, TE_TASK_START, CMD_OR_IOF_NA, dbuf);
4918         if (dbuf) {
4919                 itask->itask_allocated_buf_map = 1;
4920                 itask->itask_dbufs[0] = dbuf;
4921                 dbuf->db_handle = 0;
4922         } else {
4923                 itask->itask_allocated_buf_map = 0;
4924                 itask->itask_dbufs[0] = NULL;
4925         }
4926 
4927         STMF_ENQUEUE_ITASK(w, itask);
4928 
4929         mutex_exit(&w->worker_lock);
4930         mutex_exit(&itask->itask_mutex);
4931 
4932         /*
4933          * This can only happen if during stmf_task_alloc(), ILU_RESET_ACTIVE
4934          * was set between checking of ILU_RESET_ACTIVE and clearing of the
4935          * ITASK_IN_FREE_LIST flag. Take care of these "sneaked-in" tasks here.
4936          */
4937         if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4938                 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ABORTED, NULL);
4939         }
4940 }
4941 
4942 static void
4943 stmf_task_audit(stmf_i_scsi_task_t *itask,
4944     task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf)
4945 {
4946         stmf_task_audit_rec_t *ar;
4947 
4948         mutex_enter(&itask->itask_audit_mutex);
4949         ar = &itask->itask_audit_records[itask->itask_audit_index++];
4950         itask->itask_audit_index &= (ITASK_TASK_AUDIT_DEPTH - 1);
4951         ar->ta_event = te;
4952         ar->ta_cmd_or_iof = cmd_or_iof;
4953         ar->ta_itask_flags = itask->itask_flags;
4954         ar->ta_dbuf = dbuf;
4955         gethrestime(&ar->ta_timestamp);
4956         mutex_exit(&itask->itask_audit_mutex);
4957 }
4958 
4959 
4960 /*
4961  * ++++++++++++++ ABORT LOGIC ++++++++++++++++++++
4962  * Once ITASK_BEING_ABORTED is set, ITASK_KNOWN_TO_LU can be reset already
4963  * i.e. before ITASK_BEING_ABORTED being set. But if it was not, it cannot
4964  * be reset until the LU explicitly calls stmf_task_lu_aborted(). Of course
4965  * the LU will make this call only if we call the LU's abort entry point.
4966  * we will only call that entry point if ITASK_KNOWN_TO_LU was set.
4967  *
4968  * Same logic applies for the port.
4969  *
4970  * Also ITASK_BEING_ABORTED will not be allowed to set if both KNOWN_TO_LU
4971  * and KNOWN_TO_TGT_PORT are reset.
4972  *
4973  * +++++++++++++++++++++++++++++++++++++++++++++++
4974  */
4975 
4976 stmf_status_t
4977 stmf_xfer_data(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t ioflags)
4978 {
4979         stmf_status_t ret = STMF_SUCCESS;
4980 
4981         stmf_i_scsi_task_t *itask =
4982             (stmf_i_scsi_task_t *)task->task_stmf_private;
4983 
4984         stmf_task_audit(itask, TE_XFER_START, ioflags, dbuf);
4985 
4986         mutex_enter(&itask->itask_mutex);
4987         if (ioflags & STMF_IOF_LU_DONE) {
4988                 if (itask->itask_flags & ITASK_BEING_ABORTED) {
4989                         mutex_exit(&itask->itask_mutex);
4990                         return (STMF_ABORTED);
4991                 }
4992                 itask->itask_flags &= ~ITASK_KNOWN_TO_LU;
4993         }
4994         if ((itask->itask_flags & ITASK_BEING_ABORTED) != 0) {
4995                 mutex_exit(&itask->itask_mutex);
4996                 return (STMF_ABORTED);
4997         }
4998         mutex_exit(&itask->itask_mutex);
4999 
5000 #ifdef  DEBUG
5001         if (!(ioflags & STMF_IOF_STATS_ONLY) && stmf_drop_buf_counter > 0) {
5002                 if (atomic_dec_32_nv((uint32_t *)&stmf_drop_buf_counter) == 1)
5003                         return (STMF_SUCCESS);
5004         }
5005 #endif
5006 
5007         stmf_update_kstat_lu_io(task, dbuf);
5008         stmf_update_kstat_lport_io(task, dbuf);
5009         stmf_update_kstat_rport_io(task, dbuf);
5010         stmf_lport_xfer_start(itask, dbuf);
5011         if (ioflags & STMF_IOF_STATS_ONLY) {
5012                 stmf_lport_xfer_done(itask, dbuf);
5013                 return (STMF_SUCCESS);
5014         }
5015 
5016         dbuf->db_flags |= DB_LPORT_XFER_ACTIVE;
5017         ret = task->task_lport->lport_xfer_data(task, dbuf, ioflags);
5018 
5019         /*
5020          * Port provider may have already called the buffer callback in
5021          * which case dbuf->db_xfer_start_timestamp will be 0.
5022          */
5023         if (ret != STMF_SUCCESS) {
5024                 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
5025                 if (dbuf->db_xfer_start_timestamp != 0)
5026                         stmf_lport_xfer_done(itask, dbuf);
5027         }
5028 
5029         return (ret);
5030 }
5031 
5032 void
5033 stmf_data_xfer_done(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t iof)
5034 {
5035         stmf_i_scsi_task_t *itask =
5036             (stmf_i_scsi_task_t *)task->task_stmf_private;
5037         stmf_i_local_port_t *ilport;
5038         stmf_worker_t *w = itask->itask_worker;
5039         uint32_t new;
5040         uint8_t update_queue_flags, free_it, queue_it;
5041 
5042         stmf_lport_xfer_done(itask, dbuf);
5043 
5044         stmf_task_audit(itask, TE_XFER_DONE, iof, dbuf);
5045 
5046         /* Guard against unexpected completions from the lport */
5047         if (dbuf->db_flags & DB_LPORT_XFER_ACTIVE) {
5048                 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
5049         } else {
5050                 /*
5051                  * This should never happen.
5052                  */
5053                 ilport = task->task_lport->lport_stmf_private;
5054                 ilport->ilport_unexpected_comp++;
5055                 cmn_err(CE_PANIC, "Unexpected xfer completion task %p dbuf %p",
5056                     (void *)task, (void *)dbuf);
5057                 return;
5058         }
5059 
5060         mutex_enter(&itask->itask_mutex);
5061         mutex_enter(&w->worker_lock);
5062         new = itask->itask_flags;
5063         if (itask->itask_flags & ITASK_BEING_ABORTED) {
5064                 mutex_exit(&w->worker_lock);
5065                 mutex_exit(&itask->itask_mutex);
5066                 return;
5067         }
5068         free_it = 0;
5069         if (iof & STMF_IOF_LPORT_DONE) {
5070                 new &= ~ITASK_KNOWN_TO_TGT_PORT;
5071                 task->task_completion_status = dbuf->db_xfer_status;
5072                 free_it = 1;
5073         }
5074         /*
5075          * If the task is known to LU then queue it. But if
5076          * it is already queued (multiple completions) then
5077          * just update the buffer information by grabbing the
5078          * worker lock. If the task is not known to LU,
5079          * completed/aborted, then see if we need to
5080          * free this task.
5081          */
5082         if (itask->itask_flags & ITASK_KNOWN_TO_LU) {
5083                 free_it = 0;
5084                 update_queue_flags = 1;
5085                 if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) {
5086                         queue_it = 0;
5087                 } else {
5088                         queue_it = 1;
5089                 }
5090         } else {
5091                 update_queue_flags = 0;
5092                 queue_it = 0;
5093         }
5094         itask->itask_flags = new;
5095 
5096         if (update_queue_flags) {
5097                 uint8_t cmd = (dbuf->db_handle << 5) | ITASK_CMD_DATA_XFER_DONE;
5098 
5099                 ASSERT((itask->itask_flags & ITASK_IN_FREE_LIST) == 0);
5100                 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
5101 
5102                 itask->itask_cmd_stack[itask->itask_ncmds++] = cmd;
5103                 if (queue_it) {
5104                         STMF_ENQUEUE_ITASK(w, itask);
5105                 }
5106                 mutex_exit(&w->worker_lock);
5107                 mutex_exit(&itask->itask_mutex);
5108                 return;
5109         }
5110 
5111         mutex_exit(&w->worker_lock);
5112         if (free_it) {
5113                 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5114                     ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5115                     ITASK_BEING_ABORTED)) == 0) {
5116                         stmf_task_free(task);
5117                         return;
5118                 }
5119         }
5120         mutex_exit(&itask->itask_mutex);
5121 }
5122 
5123 stmf_status_t
5124 stmf_send_scsi_status(scsi_task_t *task, uint32_t ioflags)
5125 {
5126         DTRACE_PROBE1(scsi__send__status, scsi_task_t *, task);
5127 
5128         stmf_i_scsi_task_t *itask =
5129             (stmf_i_scsi_task_t *)task->task_stmf_private;
5130 
5131         stmf_task_audit(itask, TE_SEND_STATUS, ioflags, NULL);
5132 
5133         mutex_enter(&itask->itask_mutex);
5134         if (ioflags & STMF_IOF_LU_DONE) {
5135                 if (itask->itask_flags & ITASK_BEING_ABORTED) {
5136                         mutex_exit(&itask->itask_mutex);
5137                         return (STMF_ABORTED);
5138                 }
5139                 itask->itask_flags &= ~ITASK_KNOWN_TO_LU;
5140         }
5141 
5142         if (!(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT)) {
5143                 mutex_exit(&itask->itask_mutex);
5144                 return (STMF_SUCCESS);
5145         }
5146 
5147         if (itask->itask_flags & ITASK_BEING_ABORTED) {
5148                 mutex_exit(&itask->itask_mutex);
5149                 return (STMF_ABORTED);
5150         }
5151         mutex_exit(&itask->itask_mutex);
5152 
5153         if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
5154                 task->task_status_ctrl = 0;
5155                 task->task_resid = 0;
5156         } else if (task->task_cmd_xfer_length >
5157             task->task_expected_xfer_length) {
5158                 task->task_status_ctrl = TASK_SCTRL_OVER;
5159                 task->task_resid = task->task_cmd_xfer_length -
5160                     task->task_expected_xfer_length;
5161         } else if (task->task_nbytes_transferred <
5162             task->task_expected_xfer_length) {
5163                 task->task_status_ctrl = TASK_SCTRL_UNDER;
5164                 task->task_resid = task->task_expected_xfer_length -
5165                     task->task_nbytes_transferred;
5166         } else {
5167                 task->task_status_ctrl = 0;
5168                 task->task_resid = 0;
5169         }
5170         return (task->task_lport->lport_send_status(task, ioflags));
5171 }
5172 
5173 void
5174 stmf_send_status_done(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5175 {
5176         stmf_i_scsi_task_t *itask =
5177             (stmf_i_scsi_task_t *)task->task_stmf_private;
5178         stmf_worker_t *w = itask->itask_worker;
5179         uint32_t new;
5180         uint8_t free_it, queue_it;
5181 
5182         stmf_task_audit(itask, TE_SEND_STATUS_DONE, iof, NULL);
5183 
5184         mutex_enter(&itask->itask_mutex);
5185         mutex_enter(&w->worker_lock);
5186         new = itask->itask_flags;
5187         if (itask->itask_flags & ITASK_BEING_ABORTED) {
5188                 mutex_exit(&w->worker_lock);
5189                 mutex_exit(&itask->itask_mutex);
5190                 return;
5191         }
5192         free_it = 0;
5193         if (iof & STMF_IOF_LPORT_DONE) {
5194                 new &= ~ITASK_KNOWN_TO_TGT_PORT;
5195                 free_it = 1;
5196         }
5197         /*
5198          * If the task is known to LU then queue it. But if
5199          * it is already queued (multiple completions) then
5200          * just update the buffer information by grabbing the
5201          * worker lock. If the task is not known to LU,
5202          * completed/aborted, then see if we need to
5203          * free this task.
5204          */
5205         if (itask->itask_flags & ITASK_KNOWN_TO_LU) {
5206                 free_it = 0;
5207                 queue_it = 1;
5208                 if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) {
5209                         cmn_err(CE_PANIC, "status completion received"
5210                             " when task is already in worker queue "
5211                             " task = %p", (void *)task);
5212                 }
5213         } else {
5214                 queue_it = 0;
5215         }
5216         itask->itask_flags = new;
5217         task->task_completion_status = s;
5218 
5219         if (queue_it) {
5220                 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
5221                 itask->itask_cmd_stack[itask->itask_ncmds++] =
5222                     ITASK_CMD_STATUS_DONE;
5223 
5224                 STMF_ENQUEUE_ITASK(w, itask);
5225                 mutex_exit(&w->worker_lock);
5226                 mutex_exit(&itask->itask_mutex);
5227                 return;
5228         }
5229 
5230         mutex_exit(&w->worker_lock);
5231 
5232         if (free_it) {
5233                 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5234                     ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5235                     ITASK_BEING_ABORTED)) == 0) {
5236                         stmf_task_free(task);
5237                         return;
5238                 } else {
5239                         cmn_err(CE_PANIC, "LU is done with the task but LPORT "
5240                             " is not done, itask %p itask_flags %x",
5241                             (void *)itask, itask->itask_flags);
5242                 }
5243         }
5244         mutex_exit(&itask->itask_mutex);
5245 }
5246 
5247 void
5248 stmf_task_lu_done(scsi_task_t *task)
5249 {
5250         stmf_i_scsi_task_t *itask =
5251             (stmf_i_scsi_task_t *)task->task_stmf_private;
5252         stmf_worker_t *w = itask->itask_worker;
5253 
5254         mutex_enter(&itask->itask_mutex);
5255         mutex_enter(&w->worker_lock);
5256         if (itask->itask_flags & ITASK_BEING_ABORTED) {
5257                 mutex_exit(&w->worker_lock);
5258                 mutex_exit(&itask->itask_mutex);
5259                 return;
5260         }
5261         if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) {
5262                 cmn_err(CE_PANIC, "task_lu_done received"
5263                     " when task is in worker queue "
5264                     " task = %p", (void *)task);
5265         }
5266         itask->itask_flags &= ~ITASK_KNOWN_TO_LU;
5267 
5268         mutex_exit(&w->worker_lock);
5269         if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5270             ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5271             ITASK_BEING_ABORTED)) == 0) {
5272                 stmf_task_free(task);
5273                 return;
5274         } else {
5275                 cmn_err(CE_PANIC, "stmf_lu_done should be the last stage but "
5276                     " the task is still not done, task = %p", (void *)task);
5277         }
5278         mutex_exit(&itask->itask_mutex);
5279 }
5280 
5281 void
5282 stmf_queue_task_for_abort(scsi_task_t *task, stmf_status_t s)
5283 {
5284         stmf_i_scsi_task_t *itask =
5285             (stmf_i_scsi_task_t *)task->task_stmf_private;
5286         stmf_worker_t *w;
5287 
5288         stmf_task_audit(itask, TE_TASK_ABORT, CMD_OR_IOF_NA, NULL);
5289 
5290         mutex_enter(&itask->itask_mutex);
5291         if ((itask->itask_flags & ITASK_BEING_ABORTED) ||
5292             ((itask->itask_flags & (ITASK_KNOWN_TO_TGT_PORT |
5293             ITASK_KNOWN_TO_LU)) == 0)) {
5294                 mutex_exit(&itask->itask_mutex);
5295                 return;
5296         }
5297         itask->itask_flags |= ITASK_BEING_ABORTED;
5298         task->task_completion_status = s;
5299 
5300         if (((w = itask->itask_worker) == NULL) ||
5301             (itask->itask_flags & ITASK_IN_TRANSITION)) {
5302                 mutex_exit(&itask->itask_mutex);
5303                 return;
5304         }
5305 
5306         /* Queue it and get out */
5307         if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) {
5308                 mutex_exit(&itask->itask_mutex);
5309                 return;
5310         }
5311         mutex_enter(&w->worker_lock);
5312         STMF_ENQUEUE_ITASK(w, itask);
5313         mutex_exit(&w->worker_lock);
5314         mutex_exit(&itask->itask_mutex);
5315 }
5316 
5317 void
5318 stmf_abort(int abort_cmd, scsi_task_t *task, stmf_status_t s, void *arg)
5319 {
5320         stmf_i_scsi_task_t *itask = NULL;
5321         uint32_t f, rf;
5322 
5323         DTRACE_PROBE2(scsi__task__abort, scsi_task_t *, task,
5324             stmf_status_t, s);
5325 
5326         switch (abort_cmd) {
5327         case STMF_QUEUE_ABORT_LU:
5328                 stmf_task_lu_killall((stmf_lu_t *)arg, task, s);
5329                 return;
5330         case STMF_QUEUE_TASK_ABORT:
5331                 stmf_queue_task_for_abort(task, s);
5332                 return;
5333         case STMF_REQUEUE_TASK_ABORT_LPORT:
5334                 rf = ITASK_TGT_PORT_ABORT_CALLED;
5335                 f = ITASK_KNOWN_TO_TGT_PORT;
5336                 break;
5337         case STMF_REQUEUE_TASK_ABORT_LU:
5338                 rf = ITASK_LU_ABORT_CALLED;
5339                 f = ITASK_KNOWN_TO_LU;
5340                 break;
5341         default:
5342                 return;
5343         }
5344 
5345         itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
5346         mutex_enter(&itask->itask_mutex);
5347         f |= ITASK_BEING_ABORTED | rf;
5348 
5349         if ((itask->itask_flags & f) != f) {
5350                 mutex_exit(&itask->itask_mutex);
5351                 return;
5352         }
5353         itask->itask_flags &= ~rf;
5354         mutex_exit(&itask->itask_mutex);
5355 
5356 }
5357 
5358 /*
5359  * NOTE: stmf_abort_task_offline will release and then reacquire the
5360  * itask_mutex. This is required to prevent a lock order violation.
5361  */
5362 void
5363 stmf_task_lu_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5364 {
5365         char                     info[STMF_CHANGE_INFO_LEN];
5366         stmf_i_scsi_task_t      *itask = TASK_TO_ITASK(task);
5367         unsigned long long      st;
5368 
5369         stmf_task_audit(itask, TE_TASK_LU_ABORTED, iof, NULL);
5370         ASSERT(mutex_owned(&itask->itask_mutex));
5371         st = s; /* gcc fix */
5372         if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5373                 (void) snprintf(info, sizeof (info),
5374                     "task %p, lu failed to abort ret=%llx", (void *)task, st);
5375         } else if ((iof & STMF_IOF_LU_DONE) == 0) {
5376                 (void) snprintf(info, sizeof (info),
5377                     "Task aborted but LU is not finished, task ="
5378                     "%p, s=%llx, iof=%x", (void *)task, st, iof);
5379         } else {
5380                 /*
5381                  * LU abort successfully
5382                  */
5383                 atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_LU);
5384                 return;
5385         }
5386 
5387         stmf_abort_task_offline(task, 1, info);
5388 }
5389 
5390 /*
5391  * NOTE: stmf_abort_task_offline will release and then reacquire the
5392  * itask_mutex. This is required to prevent a lock order violation.
5393  */
5394 void
5395 stmf_task_lport_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5396 {
5397         char                    info[STMF_CHANGE_INFO_LEN];
5398         stmf_i_scsi_task_t      *itask = TASK_TO_ITASK(task);
5399         unsigned long long      st;
5400 
5401         ASSERT(mutex_owned(&itask->itask_mutex));
5402         stmf_task_audit(itask, TE_TASK_LPORT_ABORTED, iof, NULL);
5403         st = s;
5404         if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5405                 (void) snprintf(info, sizeof (info),
5406                     "task %p, tgt port failed to abort ret=%llx", (void *)task,
5407                     st);
5408         } else if ((iof & STMF_IOF_LPORT_DONE) == 0) {
5409                 (void) snprintf(info, sizeof (info),
5410                     "Task aborted but tgt port is not finished, "
5411                     "task=%p, s=%llx, iof=%x", (void *)task, st, iof);
5412         } else {
5413                 /*
5414                  * LPORT abort successfully
5415                  */
5416                 atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_TGT_PORT);
5417                 return;
5418         }
5419 
5420         stmf_abort_task_offline(task, 0, info);
5421 }
5422 
5423 void
5424 stmf_task_lport_aborted_unlocked(scsi_task_t *task, stmf_status_t s,
5425     uint32_t iof)
5426 {
5427         stmf_i_scsi_task_t      *itask = TASK_TO_ITASK(task);
5428 
5429         mutex_enter(&itask->itask_mutex);
5430         stmf_task_lport_aborted(task, s, iof);
5431         mutex_exit(&itask->itask_mutex);
5432 }
5433 
5434 stmf_status_t
5435 stmf_task_poll_lu(scsi_task_t *task, uint32_t timeout)
5436 {
5437         stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5438             task->task_stmf_private;
5439         stmf_worker_t *w = itask->itask_worker;
5440         int i;
5441 
5442         mutex_enter(&itask->itask_mutex);
5443         ASSERT(itask->itask_flags & ITASK_KNOWN_TO_LU);
5444         mutex_enter(&w->worker_lock);
5445         if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5446                 mutex_exit(&w->worker_lock);
5447                 mutex_exit(&itask->itask_mutex);
5448                 return (STMF_BUSY);
5449         }
5450         for (i = 0; i < itask->itask_ncmds; i++) {
5451                 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LU) {
5452                         mutex_exit(&w->worker_lock);
5453                         mutex_exit(&itask->itask_mutex);
5454                         return (STMF_SUCCESS);
5455                 }
5456         }
5457         itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LU;
5458         if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5459                 itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5460         } else {
5461                 clock_t t = drv_usectohz(timeout * 1000);
5462                 if (t == 0)
5463                         t = 1;
5464                 itask->itask_poll_timeout = ddi_get_lbolt() + t;
5465         }
5466         if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5467                 STMF_ENQUEUE_ITASK(w, itask);
5468         }
5469         mutex_exit(&w->worker_lock);
5470         mutex_exit(&itask->itask_mutex);
5471         return (STMF_SUCCESS);
5472 }
5473 
5474 stmf_status_t
5475 stmf_task_poll_lport(scsi_task_t *task, uint32_t timeout)
5476 {
5477         stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5478             task->task_stmf_private;
5479         stmf_worker_t *w = itask->itask_worker;
5480         int i;
5481 
5482         mutex_enter(&itask->itask_mutex);
5483         ASSERT(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT);
5484         mutex_enter(&w->worker_lock);
5485         if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5486                 mutex_exit(&w->worker_lock);
5487                 mutex_exit(&itask->itask_mutex);
5488                 return (STMF_BUSY);
5489         }
5490         for (i = 0; i < itask->itask_ncmds; i++) {
5491                 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LPORT) {
5492                         mutex_exit(&w->worker_lock);
5493                         mutex_exit(&itask->itask_mutex);
5494                         return (STMF_SUCCESS);
5495                 }
5496         }
5497         itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LPORT;
5498         if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5499                 itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5500         } else {
5501                 clock_t t = drv_usectohz(timeout * 1000);
5502                 if (t == 0)
5503                         t = 1;
5504                 itask->itask_poll_timeout = ddi_get_lbolt() + t;
5505         }
5506         if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5507                 STMF_ENQUEUE_ITASK(w, itask);
5508         }
5509         mutex_exit(&w->worker_lock);
5510         mutex_exit(&itask->itask_mutex);
5511         return (STMF_SUCCESS);
5512 }
5513 
5514 void
5515 stmf_do_task_abort(scsi_task_t *task)
5516 {
5517         stmf_i_scsi_task_t      *itask = TASK_TO_ITASK(task);
5518         stmf_lu_t               *lu;
5519         stmf_local_port_t       *lport;
5520         unsigned long long       ret;
5521         uint32_t                 new = 0;
5522         uint8_t                  call_lu_abort, call_port_abort;
5523         char                     info[STMF_CHANGE_INFO_LEN];
5524 
5525         lu = task->task_lu;
5526         lport = task->task_lport;
5527         mutex_enter(&itask->itask_mutex);
5528         new = itask->itask_flags;
5529         if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5530             ITASK_LU_ABORT_CALLED)) == ITASK_KNOWN_TO_LU) {
5531                 new |= ITASK_LU_ABORT_CALLED;
5532                 call_lu_abort = 1;
5533         } else {
5534                 call_lu_abort = 0;
5535         }
5536         itask->itask_flags = new;
5537 
5538         if (call_lu_abort) {
5539                 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) {
5540                         ret = lu->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5541                 } else {
5542                         ret = dlun0->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5543                 }
5544                 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5545                         stmf_task_lu_aborted(task, ret, STMF_IOF_LU_DONE);
5546                 } else if (ret == STMF_BUSY) {
5547                         atomic_and_32(&itask->itask_flags,
5548                             ~ITASK_LU_ABORT_CALLED);
5549                 } else if (ret != STMF_SUCCESS) {
5550                         (void) snprintf(info, sizeof (info),
5551                             "Abort failed by LU %p, ret %llx", (void *)lu, ret);
5552                         stmf_abort_task_offline(task, 1, info);
5553                 }
5554         } else if (itask->itask_flags & ITASK_KNOWN_TO_LU) {
5555                 if (ddi_get_lbolt() > (itask->itask_start_time +
5556                     STMF_SEC2TICK(lu->lu_abort_timeout?
5557                     lu->lu_abort_timeout : ITASK_DEFAULT_ABORT_TIMEOUT))) {
5558                         (void) snprintf(info, sizeof (info),
5559                             "lu abort timed out");
5560                         stmf_abort_task_offline(itask->itask_task, 1, info);
5561                 }
5562         }
5563 
5564         /*
5565          * NOTE: After the call to either stmf_abort_task_offline() or
5566          * stmf_task_lu_abort() the itask_mutex was dropped and reacquired
5567          * to avoid a deadlock situation with stmf_state.stmf_lock.
5568          */
5569 
5570         new = itask->itask_flags;
5571         if ((itask->itask_flags & (ITASK_KNOWN_TO_TGT_PORT |
5572             ITASK_TGT_PORT_ABORT_CALLED)) == ITASK_KNOWN_TO_TGT_PORT) {
5573                 new |= ITASK_TGT_PORT_ABORT_CALLED;
5574                 call_port_abort = 1;
5575         } else {
5576                 call_port_abort = 0;
5577         }
5578         itask->itask_flags = new;
5579 
5580         if (call_port_abort) {
5581                 ret = lport->lport_abort(lport, STMF_LPORT_ABORT_TASK, task, 0);
5582                 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5583                         stmf_task_lport_aborted(task, ret, STMF_IOF_LPORT_DONE);
5584                 } else if (ret == STMF_BUSY) {
5585                         atomic_and_32(&itask->itask_flags,
5586                             ~ITASK_TGT_PORT_ABORT_CALLED);
5587                 } else if (ret != STMF_SUCCESS) {
5588                         (void) snprintf(info, sizeof (info),
5589                             "Abort failed by tgt port %p ret %llx",
5590                             (void *)lport, ret);
5591                         stmf_abort_task_offline(task, 0, info);
5592                 }
5593         } else if (itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT) {
5594                 if (ddi_get_lbolt() > (itask->itask_start_time +
5595                     STMF_SEC2TICK(lport->lport_abort_timeout?
5596                     lport->lport_abort_timeout :
5597                     ITASK_DEFAULT_ABORT_TIMEOUT))) {
5598                         (void) snprintf(info, sizeof (info),
5599                             "lport abort timed out");
5600                         stmf_abort_task_offline(itask->itask_task, 0, info);
5601                 }
5602         }
5603         mutex_exit(&itask->itask_mutex);
5604 }
5605 
5606 stmf_status_t
5607 stmf_ctl(int cmd, void *obj, void *arg)
5608 {
5609         stmf_status_t                   ret;
5610         stmf_i_lu_t                     *ilu;
5611         stmf_i_local_port_t             *ilport;
5612         stmf_state_change_info_t        *ssci = (stmf_state_change_info_t *)arg;
5613 
5614         mutex_enter(&stmf_state.stmf_lock);
5615         ret = STMF_INVALID_ARG;
5616         if (cmd & STMF_CMD_LU_OP) {
5617                 ilu = stmf_lookup_lu((stmf_lu_t *)obj);
5618                 if (ilu == NULL) {
5619                         goto stmf_ctl_lock_exit;
5620                 }
5621                 DTRACE_PROBE3(lu__state__change,
5622                     stmf_lu_t *, ilu->ilu_lu,
5623                     int, cmd, stmf_state_change_info_t *, ssci);
5624         } else if (cmd & STMF_CMD_LPORT_OP) {
5625                 ilport = stmf_lookup_lport((stmf_local_port_t *)obj);
5626                 if (ilport == NULL) {
5627                         goto stmf_ctl_lock_exit;
5628                 }
5629                 DTRACE_PROBE3(lport__state__change,
5630                     stmf_local_port_t *, ilport->ilport_lport,
5631                     int, cmd, stmf_state_change_info_t *, ssci);
5632         } else {
5633                 goto stmf_ctl_lock_exit;
5634         }
5635 
5636         switch (cmd) {
5637         case STMF_CMD_LU_ONLINE:
5638                 switch (ilu->ilu_state) {
5639                         case STMF_STATE_OFFLINE:
5640                                 ret = STMF_SUCCESS;
5641                                 break;
5642                         case STMF_STATE_ONLINE:
5643                         case STMF_STATE_ONLINING:
5644                                 ret = STMF_ALREADY;
5645                                 break;
5646                         case STMF_STATE_OFFLINING:
5647                                 ret = STMF_BUSY;
5648                                 break;
5649                         default:
5650                                 ret = STMF_BADSTATE;
5651                                 break;
5652                 }
5653                 if (ret != STMF_SUCCESS)
5654                         goto stmf_ctl_lock_exit;
5655 
5656                 ilu->ilu_state = STMF_STATE_ONLINING;
5657                 mutex_exit(&stmf_state.stmf_lock);
5658                 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5659                 break;
5660 
5661         case STMF_CMD_LU_ONLINE_COMPLETE:
5662                 if (ilu->ilu_state != STMF_STATE_ONLINING) {
5663                         ret = STMF_BADSTATE;
5664                         goto stmf_ctl_lock_exit;
5665                 }
5666                 if (((stmf_change_status_t *)arg)->st_completion_status ==
5667                     STMF_SUCCESS) {
5668                         ilu->ilu_state = STMF_STATE_ONLINE;
5669                         mutex_exit(&stmf_state.stmf_lock);
5670                         ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj,
5671                             STMF_ACK_LU_ONLINE_COMPLETE, arg);
5672                         mutex_enter(&stmf_state.stmf_lock);
5673                         stmf_add_lu_to_active_sessions((stmf_lu_t *)obj);
5674                 } else {
5675                         /* XXX: should throw a meesage an record more data */
5676                         ilu->ilu_state = STMF_STATE_OFFLINE;
5677                 }
5678                 ret = STMF_SUCCESS;
5679                 goto stmf_ctl_lock_exit;
5680 
5681         case STMF_CMD_LU_OFFLINE:
5682                 switch (ilu->ilu_state) {
5683                         case STMF_STATE_ONLINE:
5684                                 ret = STMF_SUCCESS;
5685                                 break;
5686                         case STMF_STATE_OFFLINE:
5687                         case STMF_STATE_OFFLINING:
5688                                 ret = STMF_ALREADY;
5689                                 break;
5690                         case STMF_STATE_ONLINING:
5691                                 ret = STMF_BUSY;
5692                                 break;
5693                         default:
5694                                 ret = STMF_BADSTATE;
5695                                 break;
5696                 }
5697                 if (ret != STMF_SUCCESS)
5698                         goto stmf_ctl_lock_exit;
5699                 ilu->ilu_state = STMF_STATE_OFFLINING;
5700                 mutex_exit(&stmf_state.stmf_lock);
5701                 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5702                 break;
5703 
5704         case STMF_CMD_LU_OFFLINE_COMPLETE:
5705                 if (ilu->ilu_state != STMF_STATE_OFFLINING) {
5706                         ret = STMF_BADSTATE;
5707                         goto stmf_ctl_lock_exit;
5708                 }
5709                 if (((stmf_change_status_t *)arg)->st_completion_status ==
5710                     STMF_SUCCESS) {
5711                         ilu->ilu_state = STMF_STATE_OFFLINE;
5712                         mutex_exit(&stmf_state.stmf_lock);
5713                         ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj,
5714                             STMF_ACK_LU_OFFLINE_COMPLETE, arg);
5715                         mutex_enter(&stmf_state.stmf_lock);
5716                 } else {
5717                         ilu->ilu_state = STMF_STATE_ONLINE;
5718                         stmf_add_lu_to_active_sessions((stmf_lu_t *)obj);
5719                 }
5720                 mutex_exit(&stmf_state.stmf_lock);
5721                 break;
5722 
5723         /*
5724          * LPORT_ONLINE/OFFLINE has nothing to do with link offline/online.
5725          * It's related with hardware disable/enable.
5726          */
5727         case STMF_CMD_LPORT_ONLINE:
5728                 switch (ilport->ilport_state) {
5729                         case STMF_STATE_OFFLINE:
5730                                 ret = STMF_SUCCESS;
5731                                 break;
5732                         case STMF_STATE_ONLINE:
5733                         case STMF_STATE_ONLINING:
5734                                 ret = STMF_ALREADY;
5735                                 break;
5736                         case STMF_STATE_OFFLINING:
5737                                 ret = STMF_BUSY;
5738                                 break;
5739                         default:
5740                                 ret = STMF_BADSTATE;
5741                                 break;
5742                 }
5743                 if (ret != STMF_SUCCESS)
5744                         goto stmf_ctl_lock_exit;
5745 
5746                 /*
5747                  * Only user request can recover the port from the
5748                  * FORCED_OFFLINE state
5749                  */
5750                 if (ilport->ilport_flags & ILPORT_FORCED_OFFLINE) {
5751                         if (!(ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) {
5752                                 ret = STMF_FAILURE;
5753                                 goto stmf_ctl_lock_exit;
5754                         }
5755                 }
5756 
5757                 /*
5758                  * Avoid too frequent request to online
5759                  */
5760                 if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
5761                         ilport->ilport_online_times = 0;
5762                         ilport->ilport_avg_interval = 0;
5763                 }
5764                 if ((ilport->ilport_avg_interval < STMF_AVG_ONLINE_INTERVAL) &&
5765                     (ilport->ilport_online_times >= 4)) {
5766                         ret = STMF_FAILURE;
5767                         ilport->ilport_flags |= ILPORT_FORCED_OFFLINE;
5768                         stmf_trace(NULL, "stmf_ctl: too frequent request to "
5769                             "online the port");
5770                         cmn_err(CE_WARN, "stmf_ctl: too frequent request to "
5771                             "online the port, set FORCED_OFFLINE now");
5772                         goto stmf_ctl_lock_exit;
5773                 }
5774                 if (ilport->ilport_online_times > 0) {
5775                         if (ilport->ilport_online_times == 1) {
5776                                 ilport->ilport_avg_interval = ddi_get_lbolt() -
5777                                     ilport->ilport_last_online_clock;
5778                         } else {
5779                                 ilport->ilport_avg_interval =
5780                                     (ilport->ilport_avg_interval +
5781                                     ddi_get_lbolt() -
5782                                     ilport->ilport_last_online_clock) >> 1;
5783                         }
5784                 }
5785                 ilport->ilport_last_online_clock = ddi_get_lbolt();
5786                 ilport->ilport_online_times++;
5787 
5788                 /*
5789                  * Submit online service request
5790                  */
5791                 ilport->ilport_flags &= ~ILPORT_FORCED_OFFLINE;
5792                 ilport->ilport_state = STMF_STATE_ONLINING;
5793                 mutex_exit(&stmf_state.stmf_lock);
5794                 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5795                 break;
5796 
5797         case STMF_CMD_LPORT_ONLINE_COMPLETE:
5798                 if (ilport->ilport_state != STMF_STATE_ONLINING) {
5799                         ret = STMF_BADSTATE;
5800                         goto stmf_ctl_lock_exit;
5801                 }
5802                 if (((stmf_change_status_t *)arg)->st_completion_status ==
5803                     STMF_SUCCESS) {
5804                         ilport->ilport_state = STMF_STATE_ONLINE;
5805                         mutex_exit(&stmf_state.stmf_lock);
5806                         ((stmf_local_port_t *)obj)->lport_ctl(
5807                             (stmf_local_port_t *)obj,
5808                             STMF_ACK_LPORT_ONLINE_COMPLETE, arg);
5809                         mutex_enter(&stmf_state.stmf_lock);
5810                 } else {
5811                         ilport->ilport_state = STMF_STATE_OFFLINE;
5812                 }
5813                 ret = STMF_SUCCESS;
5814                 goto stmf_ctl_lock_exit;
5815 
5816         case STMF_CMD_LPORT_OFFLINE:
5817                 switch (ilport->ilport_state) {
5818                         case STMF_STATE_ONLINE:
5819                                 ret = STMF_SUCCESS;
5820                                 break;
5821                         case STMF_STATE_OFFLINE:
5822                         case STMF_STATE_OFFLINING:
5823                                 ret = STMF_ALREADY;
5824                                 break;
5825                         case STMF_STATE_ONLINING:
5826                                 ret = STMF_BUSY;
5827                                 break;
5828                         default:
5829                                 ret = STMF_BADSTATE;
5830                                 break;
5831                 }
5832                 if (ret != STMF_SUCCESS)
5833                         goto stmf_ctl_lock_exit;
5834 
5835                 ilport->ilport_state = STMF_STATE_OFFLINING;
5836                 mutex_exit(&stmf_state.stmf_lock);
5837                 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5838                 break;
5839 
5840         case STMF_CMD_LPORT_OFFLINE_COMPLETE:
5841                 if (ilport->ilport_state != STMF_STATE_OFFLINING) {
5842                         ret = STMF_BADSTATE;
5843                         goto stmf_ctl_lock_exit;
5844                 }
5845                 if (((stmf_change_status_t *)arg)->st_completion_status ==
5846                     STMF_SUCCESS) {
5847                         ilport->ilport_state = STMF_STATE_OFFLINE;
5848                         mutex_exit(&stmf_state.stmf_lock);
5849                         ((stmf_local_port_t *)obj)->lport_ctl(
5850                             (stmf_local_port_t *)obj,
5851                             STMF_ACK_LPORT_OFFLINE_COMPLETE, arg);
5852                         mutex_enter(&stmf_state.stmf_lock);
5853                 } else {
5854                         ilport->ilport_state = STMF_STATE_ONLINE;
5855                 }
5856                 mutex_exit(&stmf_state.stmf_lock);
5857                 break;
5858 
5859         default:
5860                 cmn_err(CE_WARN, "Invalid ctl cmd received %x", cmd);
5861                 ret = STMF_INVALID_ARG;
5862                 goto stmf_ctl_lock_exit;
5863         }
5864 
5865         return (STMF_SUCCESS);
5866 
5867 stmf_ctl_lock_exit:;
5868         mutex_exit(&stmf_state.stmf_lock);
5869         return (ret);
5870 }
5871 
5872 /* ARGSUSED */
5873 stmf_status_t
5874 stmf_info_impl(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf,
5875     uint32_t *bufsizep)
5876 {
5877         return (STMF_NOT_SUPPORTED);
5878 }
5879 
5880 /* ARGSUSED */
5881 stmf_status_t
5882 stmf_info(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf,
5883     uint32_t *bufsizep)
5884 {
5885         uint32_t cl = SI_GET_CLASS(cmd);
5886 
5887         if (cl == SI_STMF) {
5888                 return (stmf_info_impl(cmd, arg1, arg2, buf, bufsizep));
5889         }
5890         if (cl == SI_LPORT) {
5891                 return (((stmf_local_port_t *)arg1)->lport_info(cmd, arg1,
5892                     arg2, buf, bufsizep));
5893         } else if (cl == SI_LU) {
5894                 return (((stmf_lu_t *)arg1)->lu_info(cmd, arg1, arg2, buf,
5895                     bufsizep));
5896         }
5897 
5898         return (STMF_NOT_SUPPORTED);
5899 }
5900 
5901 /*
5902  * Used by port providers. pwwn is 8 byte wwn, sdid is the devid used by
5903  * stmf to register local ports. The ident should have 20 bytes in buffer
5904  * space to convert the wwn to "wwn.xxxxxxxxxxxxxxxx" string.
5905  */
5906 void
5907 stmf_wwn_to_devid_desc(scsi_devid_desc_t *sdid, uint8_t *wwn,
5908     uint8_t protocol_id)
5909 {
5910         char wwn_str[20+1];
5911 
5912         sdid->protocol_id = protocol_id;
5913         sdid->piv = 1;
5914         sdid->code_set = CODE_SET_ASCII;
5915         sdid->association = ID_IS_TARGET_PORT;
5916         sdid->ident_length = 20;
5917         /* Convert wwn value to "wwn.XXXXXXXXXXXXXXXX" format */
5918         (void) snprintf(wwn_str, sizeof (wwn_str),
5919             "wwn.%02X%02X%02X%02X%02X%02X%02X%02X",
5920             wwn[0], wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7]);
5921         bcopy(wwn_str, (char *)sdid->ident, 20);
5922 }
5923 
5924 
5925 stmf_xfer_data_t *
5926 stmf_prepare_tpgs_data(uint8_t ilu_alua)
5927 {
5928         stmf_xfer_data_t *xd;
5929         stmf_i_local_port_t *ilport;
5930         uint8_t *p;
5931         uint32_t sz, asz, nports = 0, nports_standby = 0;
5932 
5933         mutex_enter(&stmf_state.stmf_lock);
5934         /* check if any ports are standby and create second group */
5935         for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
5936             ilport = ilport->ilport_next) {
5937                 if (ilport->ilport_standby == 1) {
5938                         nports_standby++;
5939                 } else {
5940                         nports++;
5941                 }
5942         }
5943 
5944         /*
5945          * Section 6.25 REPORT TARGET PORT GROUPS
5946          * The reply can contain many group replies. Each group is limited
5947          * to 255 port identifiers so we'll need to limit the amount of
5948          * data returned. For FC ports there's a physical limitation in
5949          * machines that make reaching 255 ports very, very unlikely. For
5950          * iSCSI on the other hand recent changes mean the port count could
5951          * be as high as 4096 (current limit). Limiting the data returned
5952          * for iSCSI isn't as bad as it sounds. This information is only
5953          * important for ALUA, which isn't supported for iSCSI. iSCSI uses
5954          * virtual IP addresses to deal with node fail over in a cluster.
5955          */
5956         nports = min(nports, 255);
5957         nports_standby = min(nports_standby, 255);
5958 
5959         /*
5960          * The first 4 bytes of the returned data is the length. The
5961          * size of the Target Port Group header is 8 bytes. So, that's where
5962          * the 12 comes from. Each port entry is 4 bytes in size.
5963          */
5964         sz = (nports * 4) + 12;
5965         if (nports_standby != 0 && ilu_alua != 0) {
5966                 /* --- Only add 8 bytes since it's just the Group header ----*/
5967                 sz += (nports_standby * 4) + 8;
5968         }
5969 
5970         /*
5971          * The stmf_xfer_data structure contains 4 bytes that will be
5972          * part of the data buffer. So, subtract the 4 bytes from the space
5973          * needed.
5974          */
5975         asz = sizeof (*xd) + sz - 4;
5976         xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP);
5977         if (xd == NULL) {
5978                 mutex_exit(&stmf_state.stmf_lock);
5979                 return (NULL);
5980         }
5981         xd->alloc_size = asz;
5982         xd->size_left = sz;
5983 
5984         p = xd->buf;
5985 
5986         /* ---- length values never include the field that holds the size ----*/
5987         *((uint32_t *)p) = BE_32(sz - 4);
5988         p += 4;
5989 
5990         /* ---- Now fill out the first Target Group header ---- */
5991         p[0] = 0x80;    /* PREF */
5992         p[1] = 5;       /* AO_SUP, S_SUP */
5993         if (stmf_state.stmf_alua_node == 1) {
5994                 p[3] = 1;       /* Group 1 */
5995         } else {
5996                 p[3] = 0;       /* Group 0 */
5997         }
5998         p[7] = nports & 0xff;
5999         p += 8;
6000         for (ilport = stmf_state.stmf_ilportlist; ilport != NULL && nports != 0;
6001             ilport = ilport->ilport_next) {
6002                 if (ilport->ilport_standby == 1) {
6003                         continue;
6004                 }
6005                 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid);
6006                 p += 4;
6007                 nports--;
6008         }
6009         if (nports_standby != 0 && ilu_alua != 0) {
6010                 p[0] = 0x02;    /* Non PREF, Standby */
6011                 p[1] = 5;       /* AO_SUP, S_SUP */
6012                 if (stmf_state.stmf_alua_node == 1) {
6013                         p[3] = 0;       /* Group 0 */
6014                 } else {
6015                         p[3] = 1;       /* Group 1 */
6016                 }
6017                 p[7] = nports_standby & 0xff;
6018                 p += 8;
6019                 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL &&
6020                     nports_standby != 0; ilport = ilport->ilport_next) {
6021                         if (ilport->ilport_standby == 0) {
6022                                 continue;
6023                         }
6024                         ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid);
6025                         p += 4;
6026                         nports_standby--;
6027                 }
6028         }
6029 
6030         mutex_exit(&stmf_state.stmf_lock);
6031 
6032         return (xd);
6033 }
6034 
6035 struct scsi_devid_desc *
6036 stmf_scsilib_get_devid_desc(uint16_t rtpid)
6037 {
6038         scsi_devid_desc_t *devid = NULL;
6039         stmf_i_local_port_t *ilport;
6040 
6041         mutex_enter(&stmf_state.stmf_lock);
6042 
6043         for (ilport = stmf_state.stmf_ilportlist; ilport;
6044             ilport = ilport->ilport_next) {
6045                 if (ilport->ilport_rtpid == rtpid) {
6046                         scsi_devid_desc_t *id = ilport->ilport_lport->lport_id;
6047                         uint32_t id_sz = sizeof (scsi_devid_desc_t) +
6048                             id->ident_length;
6049                         devid = (scsi_devid_desc_t *)kmem_zalloc(id_sz,
6050                             KM_NOSLEEP);
6051                         if (devid != NULL) {
6052                                 bcopy(id, devid, id_sz);
6053                         }
6054                         break;
6055                 }
6056         }
6057 
6058         mutex_exit(&stmf_state.stmf_lock);
6059         return (devid);
6060 }
6061 
6062 uint16_t
6063 stmf_scsilib_get_lport_rtid(struct scsi_devid_desc *devid)
6064 {
6065         stmf_i_local_port_t     *ilport;
6066         scsi_devid_desc_t       *id;
6067         uint16_t                rtpid = 0;
6068 
6069         mutex_enter(&stmf_state.stmf_lock);
6070         for (ilport = stmf_state.stmf_ilportlist; ilport;
6071             ilport = ilport->ilport_next) {
6072                 id = ilport->ilport_lport->lport_id;
6073                 if ((devid->ident_length == id->ident_length) &&
6074                     (memcmp(devid->ident, id->ident, id->ident_length) == 0)) {
6075                         rtpid = ilport->ilport_rtpid;
6076                         break;
6077                 }
6078         }
6079         mutex_exit(&stmf_state.stmf_lock);
6080         return (rtpid);
6081 }
6082 
6083 static uint16_t stmf_lu_id_gen_number = 0;
6084 
6085 stmf_status_t
6086 stmf_scsilib_uniq_lu_id(uint32_t company_id, scsi_devid_desc_t *lu_id)
6087 {
6088         return (stmf_scsilib_uniq_lu_id2(company_id, 0, lu_id));
6089 }
6090 
6091 stmf_status_t
6092 stmf_scsilib_uniq_lu_id2(uint32_t company_id, uint32_t host_id,
6093     scsi_devid_desc_t *lu_id)
6094 {
6095         uint8_t *p;
6096         struct timeval32 timestamp32;
6097         uint32_t *t = (uint32_t *)&timestamp32;
6098         struct ether_addr mac;
6099         uint8_t *e = (uint8_t *)&mac;
6100         int hid = (int)host_id;
6101         uint16_t gen_number;
6102 
6103         if (company_id == COMPANY_ID_NONE)
6104                 company_id = COMPANY_ID_SUN;
6105 
6106         if (lu_id->ident_length != 0x10)
6107                 return (STMF_INVALID_ARG);
6108 
6109         p = (uint8_t *)lu_id;
6110 
6111         gen_number = atomic_inc_16_nv(&stmf_lu_id_gen_number);
6112 
6113         p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10;
6114         p[4] = ((company_id >> 20) & 0xf) | 0x60;
6115         p[5] = (company_id >> 12) & 0xff;
6116         p[6] = (company_id >> 4) & 0xff;
6117         p[7] = (company_id << 4) & 0xf0;
6118         if (hid == 0 && !localetheraddr((struct ether_addr *)NULL, &mac)) {
6119                 hid = BE_32((int)zone_get_hostid(NULL));
6120         }
6121         if (hid != 0) {
6122                 e[0] = (hid >> 24) & 0xff;
6123                 e[1] = (hid >> 16) & 0xff;
6124                 e[2] = (hid >> 8) & 0xff;
6125                 e[3] = hid & 0xff;
6126                 e[4] = e[5] = 0;
6127         }
6128         bcopy(e, p+8, 6);
6129         uniqtime32(&timestamp32);
6130         *t = BE_32(*t);
6131         bcopy(t, p+14, 4);
6132         p[18] = (gen_number >> 8) & 0xff;
6133         p[19] = gen_number & 0xff;
6134 
6135         return (STMF_SUCCESS);
6136 }
6137 
6138 /*
6139  * saa is sense key, ASC, ASCQ
6140  */
6141 void
6142 stmf_scsilib_send_status(scsi_task_t *task, uint8_t st, uint32_t saa)
6143 {
6144         uint8_t sd[18];
6145         task->task_scsi_status = st;
6146         if (st == 2) {
6147                 bzero(sd, 18);
6148                 sd[0] = 0x70;
6149                 sd[2] = (saa >> 16) & 0xf;
6150                 sd[7] = 10;
6151                 sd[12] = (saa >> 8) & 0xff;
6152                 sd[13] = saa & 0xff;
6153                 task->task_sense_data = sd;
6154                 task->task_sense_length = 18;
6155         } else {
6156                 task->task_sense_data = NULL;
6157                 task->task_sense_length = 0;
6158         }
6159         (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE);
6160 }
6161 
6162 uint32_t
6163 stmf_scsilib_prepare_vpd_page83(scsi_task_t *task, uint8_t *page,
6164     uint32_t page_len, uint8_t byte0, uint32_t vpd_mask)
6165 {
6166         uint8_t         *p = NULL;
6167         uint8_t         small_buf[32];
6168         uint32_t        sz = 0;
6169         uint32_t        n = 4;
6170         uint32_t        m = 0;
6171         uint32_t        last_bit = 0;
6172 
6173         if (page_len < 4)
6174                 return (0);
6175         if (page_len > 65535)
6176                 page_len = 65535;
6177 
6178         page[0] = byte0;
6179         page[1] = 0x83;
6180 
6181         /* CONSTCOND */
6182         while (1) {
6183                 m += sz;
6184                 if (sz && (page_len > n)) {
6185                         uint32_t copysz;
6186                         copysz = page_len > (n + sz) ? sz : page_len - n;
6187                         bcopy(p, page + n, copysz);
6188                         n += copysz;
6189                 }
6190                 vpd_mask &= ~last_bit;
6191                 if (vpd_mask == 0)
6192                         break;
6193 
6194                 if (vpd_mask & STMF_VPD_LU_ID) {
6195                         last_bit = STMF_VPD_LU_ID;
6196                         sz = task->task_lu->lu_id->ident_length + 4;
6197                         p = (uint8_t *)task->task_lu->lu_id;
6198                         continue;
6199                 } else if (vpd_mask & STMF_VPD_TARGET_ID) {
6200                         last_bit = STMF_VPD_TARGET_ID;
6201                         sz = task->task_lport->lport_id->ident_length + 4;
6202                         p = (uint8_t *)task->task_lport->lport_id;
6203                         continue;
6204                 } else if (vpd_mask & STMF_VPD_TP_GROUP) {
6205                         stmf_i_local_port_t *ilport;
6206                         last_bit = STMF_VPD_TP_GROUP;
6207                         p = small_buf;
6208                         bzero(p, 8);
6209                         p[0] = 1;
6210                         p[1] = 0x15;
6211                         p[3] = 4;
6212                         ilport = (stmf_i_local_port_t *)
6213                             task->task_lport->lport_stmf_private;
6214                         /*
6215                          * If we're in alua mode, group 1 contains all alua
6216                          * participating ports and all standby ports
6217                          * > 255. Otherwise, if we're in alua mode, any local
6218                          * ports (non standby/pppt) are also in group 1 if the
6219                          * alua node is 1. Otherwise the group is 0.
6220                          */
6221                         if ((stmf_state.stmf_alua_state &&
6222                             (ilport->ilport_alua || ilport->ilport_standby) &&
6223                             ilport->ilport_rtpid > 255) ||
6224                             (stmf_state.stmf_alua_node == 1 &&
6225                             ilport->ilport_standby != 1)) {
6226                                 p[7] = 1;       /* Group 1 */
6227                         }
6228                         sz = 8;
6229                         continue;
6230                 } else if (vpd_mask & STMF_VPD_RELATIVE_TP_ID) {
6231                         stmf_i_local_port_t *ilport;
6232 
6233                         last_bit = STMF_VPD_RELATIVE_TP_ID;
6234                         p = small_buf;
6235                         bzero(p, 8);
6236                         p[0] = 1;
6237                         p[1] = 0x14;
6238                         p[3] = 4;
6239                         ilport = (stmf_i_local_port_t *)
6240                             task->task_lport->lport_stmf_private;
6241                         p[6] = (ilport->ilport_rtpid >> 8) & 0xff;
6242                         p[7] = ilport->ilport_rtpid & 0xff;
6243                         sz = 8;
6244                         continue;
6245                 } else {
6246                         cmn_err(CE_WARN, "Invalid vpd_mask");
6247                         break;
6248                 }
6249         }
6250 
6251         page[2] = (m >> 8) & 0xff;
6252         page[3] = m & 0xff;
6253 
6254         return (n);
6255 }
6256 
6257 void
6258 stmf_scsilib_handle_report_tpgs(scsi_task_t *task, stmf_data_buf_t *dbuf)
6259 {
6260         stmf_i_scsi_task_t *itask =
6261             (stmf_i_scsi_task_t *)task->task_stmf_private;
6262         stmf_i_lu_t *ilu =
6263             (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6264         stmf_xfer_data_t *xd;
6265         uint32_t sz, minsz;
6266 
6267         mutex_enter(&itask->itask_mutex);
6268         itask->itask_flags |= ITASK_DEFAULT_HANDLING;
6269 
6270         task->task_cmd_xfer_length =
6271             ((((uint32_t)task->task_cdb[6]) << 24) |
6272             (((uint32_t)task->task_cdb[7]) << 16) |
6273             (((uint32_t)task->task_cdb[8]) << 8) |
6274             ((uint32_t)task->task_cdb[9]));
6275 
6276         if (task->task_additional_flags &
6277             TASK_AF_NO_EXPECTED_XFER_LENGTH) {
6278                 task->task_expected_xfer_length =
6279                     task->task_cmd_xfer_length;
6280         }
6281         mutex_exit(&itask->itask_mutex);
6282 
6283         if (task->task_cmd_xfer_length == 0) {
6284                 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6285                 return;
6286         }
6287         if (task->task_cmd_xfer_length < 4) {
6288                 stmf_scsilib_send_status(task, STATUS_CHECK,
6289                     STMF_SAA_INVALID_FIELD_IN_CDB);
6290                 return;
6291         }
6292 
6293         sz = min(task->task_expected_xfer_length,
6294             task->task_cmd_xfer_length);
6295 
6296         xd = stmf_prepare_tpgs_data(ilu->ilu_alua);
6297 
6298         if (xd == NULL) {
6299                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6300                     STMF_ALLOC_FAILURE, NULL);
6301                 return;
6302         }
6303 
6304         sz = min(sz, xd->size_left);
6305         xd->size_left = sz;
6306         minsz = min(512, sz);
6307 
6308         if (dbuf == NULL)
6309                 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
6310         if (dbuf == NULL) {
6311                 kmem_free(xd, xd->alloc_size);
6312                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6313                     STMF_ALLOC_FAILURE, NULL);
6314                 return;
6315         }
6316         dbuf->db_lu_private = xd;
6317         stmf_xd_to_dbuf(dbuf, 1);
6318 
6319         dbuf->db_flags = DB_DIRECTION_TO_RPORT;
6320         (void) stmf_xfer_data(task, dbuf, 0);
6321 
6322 }
6323 
6324 void
6325 stmf_scsilib_handle_task_mgmt(scsi_task_t *task)
6326 {
6327 
6328         switch (task->task_mgmt_function) {
6329         /*
6330          * For now we will abort all I/Os on the LU in case of ABORT_TASK_SET
6331          * and ABORT_TASK. But unlike LUN_RESET we will not reset LU state
6332          * in these cases. This needs to be changed to abort only the required
6333          * set.
6334          */
6335         case TM_ABORT_TASK:
6336         case TM_ABORT_TASK_SET:
6337         case TM_CLEAR_TASK_SET:
6338         case TM_LUN_RESET:
6339                 stmf_handle_lun_reset(task);
6340                 /* issue the reset to the proxy node as well */
6341                 if (stmf_state.stmf_alua_state == 1) {
6342                         (void) stmf_proxy_scsi_cmd(task, NULL);
6343                 }
6344                 return;
6345         case TM_TARGET_RESET:
6346         case TM_TARGET_COLD_RESET:
6347         case TM_TARGET_WARM_RESET:
6348                 stmf_handle_target_reset(task);
6349                 return;
6350         default:
6351                 /* We dont support this task mgmt function */
6352                 stmf_scsilib_send_status(task, STATUS_CHECK,
6353                     STMF_SAA_INVALID_FIELD_IN_CMD_IU);
6354                 return;
6355         }
6356 }
6357 
6358 void
6359 stmf_handle_lun_reset(scsi_task_t *task)
6360 {
6361         stmf_i_scsi_task_t *itask;
6362         stmf_i_lu_t *ilu;
6363 
6364         itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
6365         ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6366 
6367         /*
6368          * To sync with target reset, grab this lock. The LU is not going
6369          * anywhere as there is atleast one task pending (this task).
6370          */
6371         mutex_enter(&stmf_state.stmf_lock);
6372 
6373         if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
6374                 mutex_exit(&stmf_state.stmf_lock);
6375                 stmf_scsilib_send_status(task, STATUS_CHECK,
6376                     STMF_SAA_OPERATION_IN_PROGRESS);
6377                 return;
6378         }
6379         atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE);
6380         mutex_exit(&stmf_state.stmf_lock);
6381 
6382         /*
6383          * Mark this task as the one causing LU reset so that we know who
6384          * was responsible for setting the ILU_RESET_ACTIVE. In case this
6385          * task itself gets aborted, we will clear ILU_RESET_ACTIVE.
6386          */
6387         mutex_enter(&itask->itask_mutex);
6388         itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_CAUSING_LU_RESET;
6389         mutex_exit(&itask->itask_mutex);
6390 
6391         /* Initiatiate abort on all commands on this LU except this one */
6392         stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, task->task_lu);
6393 
6394         /* Start polling on this task */
6395         if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
6396             != STMF_SUCCESS) {
6397                 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE,
6398                     NULL);
6399                 return;
6400         }
6401 }
6402 
6403 void
6404 stmf_handle_target_reset(scsi_task_t *task)
6405 {
6406         stmf_i_scsi_task_t *itask;
6407         stmf_i_lu_t *ilu;
6408         stmf_i_scsi_session_t *iss;
6409         stmf_lun_map_t *lm;
6410         stmf_lun_map_ent_t *lm_ent;
6411         int i, lf;
6412 
6413         itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
6414         iss = (stmf_i_scsi_session_t *)task->task_session->ss_stmf_private;
6415         ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6416 
6417         /*
6418          * To sync with LUN reset, grab this lock. The session is not going
6419          * anywhere as there is atleast one task pending (this task).
6420          */
6421         mutex_enter(&stmf_state.stmf_lock);
6422 
6423         /* Grab the session lock as a writer to prevent any changes in it */
6424         rw_enter(iss->iss_lockp, RW_WRITER);
6425 
6426         if (iss->iss_flags & ISS_RESET_ACTIVE) {
6427                 rw_exit(iss->iss_lockp);
6428                 mutex_exit(&stmf_state.stmf_lock);
6429                 stmf_scsilib_send_status(task, STATUS_CHECK,
6430                     STMF_SAA_OPERATION_IN_PROGRESS);
6431                 return;
6432         }
6433         atomic_or_32(&iss->iss_flags, ISS_RESET_ACTIVE);
6434 
6435         /*
6436          * Now go through each LUN in this session and make sure all of them
6437          * can be reset.
6438          */
6439         lm = iss->iss_sm;
6440         for (i = 0, lf = 0; i < lm->lm_nentries; i++) {
6441                 if (lm->lm_plus[i] == NULL)
6442                         continue;
6443                 lf++;
6444                 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6445                 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private);
6446                 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
6447                         atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
6448                         rw_exit(iss->iss_lockp);
6449                         mutex_exit(&stmf_state.stmf_lock);
6450                         stmf_scsilib_send_status(task, STATUS_CHECK,
6451                             STMF_SAA_OPERATION_IN_PROGRESS);
6452                         return;
6453                 }
6454         }
6455         if (lf == 0) {
6456                 /* No luns in this session */
6457                 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
6458                 rw_exit(iss->iss_lockp);
6459                 mutex_exit(&stmf_state.stmf_lock);
6460                 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6461                 return;
6462         }
6463 
6464         /* ok, start the damage */
6465         mutex_enter(&itask->itask_mutex);
6466         itask->itask_flags |= ITASK_DEFAULT_HANDLING |
6467             ITASK_CAUSING_TARGET_RESET;
6468         mutex_exit(&itask->itask_mutex);
6469         for (i = 0; i < lm->lm_nentries; i++) {
6470                 if (lm->lm_plus[i] == NULL)
6471                         continue;
6472                 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6473                 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private);
6474                 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE);
6475         }
6476 
6477         for (i = 0; i < lm->lm_nentries; i++) {
6478                 if (lm->lm_plus[i] == NULL)
6479                         continue;
6480                 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6481                 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED,
6482                     lm_ent->ent_lu);
6483         }
6484 
6485         rw_exit(iss->iss_lockp);
6486         mutex_exit(&stmf_state.stmf_lock);
6487 
6488         /* Start polling on this task */
6489         if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
6490             != STMF_SUCCESS) {
6491                 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE,
6492                     NULL);
6493                 return;
6494         }
6495 }
6496 
6497 int
6498 stmf_handle_cmd_during_ic(stmf_i_scsi_task_t *itask)
6499 {
6500         scsi_task_t *task = itask->itask_task;
6501         stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
6502             task->task_session->ss_stmf_private;
6503 
6504         rw_enter(iss->iss_lockp, RW_WRITER);
6505         if (((iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) == 0) ||
6506             (task->task_cdb[0] == SCMD_INQUIRY)) {
6507                 rw_exit(iss->iss_lockp);
6508                 return (0);
6509         }
6510         atomic_and_32(&iss->iss_flags,
6511             ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
6512         rw_exit(iss->iss_lockp);
6513 
6514         if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
6515                 return (0);
6516         }
6517         stmf_scsilib_send_status(task, STATUS_CHECK,
6518             STMF_SAA_REPORT_LUN_DATA_HAS_CHANGED);
6519         return (1);
6520 }
6521 
6522 void
6523 stmf_worker_init()
6524 {
6525         uint32_t i;
6526         stmf_worker_t *w;
6527 
6528         /* Make local copy of global tunables */
6529 
6530         /*
6531          * Allow workers to be scaled down to a very low number for cases
6532          * where the load is light.  If the number of threads gets below
6533          * 4 assume it is a mistake and force the threads back to a
6534          * reasonable number.  The low limit of 4 is simply legacy and
6535          * may be too low.
6536          */
6537         ASSERT(stmf_workers == NULL);
6538         if (stmf_nworkers < 4) {
6539                 stmf_nworkers = 64;
6540         }
6541 
6542         stmf_workers = (stmf_worker_t *)kmem_zalloc(
6543             sizeof (stmf_worker_t) * stmf_nworkers, KM_SLEEP);
6544         for (i = 0; i < stmf_nworkers; i++) {
6545                 stmf_worker_t *w = &stmf_workers[i];
6546                 mutex_init(&w->worker_lock, NULL, MUTEX_DRIVER, NULL);
6547                 cv_init(&w->worker_cv, NULL, CV_DRIVER, NULL);
6548         }
6549         stmf_workers_state = STMF_WORKERS_ENABLED;
6550 
6551         /* Check if we are starting */
6552         if (stmf_nworkers_cur < stmf_nworkers - 1) {
6553                 for (i = stmf_nworkers_cur; i < stmf_nworkers; i++) {
6554                         w = &stmf_workers[i];
6555                         w->worker_tid = thread_create(NULL, 0, stmf_worker_task,
6556                             (void *)&stmf_workers[i], 0, &p0, TS_RUN,
6557                             minclsyspri);
6558                         stmf_nworkers_accepting_cmds++;
6559                 }
6560                 return;
6561         }
6562 
6563         /* Lets wait for atleast one worker to start */
6564         while (stmf_nworkers_cur == 0)
6565                 delay(drv_usectohz(20 * 1000));
6566 }
6567 
6568 stmf_status_t
6569 stmf_worker_fini()
6570 {
6571         int i;
6572         clock_t sb;
6573 
6574         if (stmf_workers_state == STMF_WORKERS_DISABLED)
6575                 return (STMF_SUCCESS);
6576         ASSERT(stmf_workers);
6577         stmf_workers_state = STMF_WORKERS_DISABLED;
6578         cv_signal(&stmf_state.stmf_cv);
6579 
6580         sb = ddi_get_lbolt() + drv_usectohz(10 * 1000 * 1000);
6581         /* Wait for all the threads to die */
6582         while (stmf_nworkers_cur != 0) {
6583                 if (ddi_get_lbolt() > sb) {
6584                         stmf_workers_state = STMF_WORKERS_ENABLED;
6585                         return (STMF_BUSY);
6586                 }
6587                 delay(drv_usectohz(100 * 1000));
6588         }
6589         for (i = 0; i < stmf_nworkers; i++) {
6590                 stmf_worker_t *w = &stmf_workers[i];
6591                 mutex_destroy(&w->worker_lock);
6592                 cv_destroy(&w->worker_cv);
6593         }
6594         kmem_free(stmf_workers, sizeof (stmf_worker_t) * stmf_nworkers);
6595         stmf_workers = NULL;
6596 
6597         return (STMF_SUCCESS);
6598 }
6599 
6600 void
6601 stmf_worker_task(void *arg)
6602 {
6603         stmf_worker_t *w;
6604         stmf_i_scsi_session_t *iss;
6605         scsi_task_t *task;
6606         stmf_i_scsi_task_t *itask;
6607         stmf_data_buf_t *dbuf;
6608         stmf_lu_t *lu;
6609         clock_t wait_timer = 0;
6610         clock_t wait_ticks, wait_delta = 0;
6611         uint8_t curcmd;
6612         uint8_t abort_free;
6613         uint8_t wait_queue;
6614         uint8_t dec_qdepth;
6615 
6616         w = (stmf_worker_t *)arg;
6617         wait_ticks = drv_usectohz(10000);
6618 
6619         DTRACE_PROBE1(worker__create, stmf_worker_t, w);
6620         mutex_enter(&w->worker_lock);
6621         w->worker_flags |= STMF_WORKER_STARTED | STMF_WORKER_ACTIVE;
6622         atomic_inc_32(&stmf_nworkers_cur);
6623 
6624 stmf_worker_loop:
6625         if ((w->worker_ref_count == 0) &&
6626             (w->worker_flags & STMF_WORKER_TERMINATE)) {
6627                 w->worker_flags &= ~(STMF_WORKER_STARTED |
6628                     STMF_WORKER_ACTIVE | STMF_WORKER_TERMINATE);
6629                 w->worker_tid = NULL;
6630                 mutex_exit(&w->worker_lock);
6631                 DTRACE_PROBE1(worker__destroy, stmf_worker_t, w);
6632                 atomic_dec_32(&stmf_nworkers_cur);
6633                 thread_exit();
6634         }
6635 
6636         /* CONSTCOND */
6637         while (1) {
6638                 /* worker lock is held at this point */
6639                 dec_qdepth = 0;
6640                 if (wait_timer && (ddi_get_lbolt() >= wait_timer)) {
6641                         wait_timer = 0;
6642                         wait_delta = 0;
6643                         if (w->worker_wait_head) {
6644                                 ASSERT(w->worker_wait_tail);
6645                                 if (w->worker_task_head == NULL)
6646                                         w->worker_task_head =
6647                                             w->worker_wait_head;
6648                                 else
6649                                         w->worker_task_tail->itask_worker_next =
6650                                             w->worker_wait_head;
6651                                 w->worker_task_tail = w->worker_wait_tail;
6652                                 w->worker_wait_head = w->worker_wait_tail =
6653                                     NULL;
6654                         }
6655                 }
6656 
6657                 STMF_DEQUEUE_ITASK(w, itask);
6658                 if (itask == NULL)
6659                         break;
6660 
6661                 ASSERT((itask->itask_flags & ITASK_IN_FREE_LIST) == 0);
6662                 task = itask->itask_task;
6663                 DTRACE_PROBE2(worker__active, stmf_worker_t, w,
6664                     scsi_task_t *, task);
6665                 wait_queue = 0;
6666                 abort_free = 0;
6667                 mutex_exit(&w->worker_lock);
6668                 mutex_enter(&itask->itask_mutex);
6669                 mutex_enter(&w->worker_lock);
6670 
6671                 if (itask->itask_ncmds > 0) {
6672                         curcmd = itask->itask_cmd_stack[itask->itask_ncmds - 1];
6673                 } else {
6674                         ASSERT(itask->itask_flags & ITASK_BEING_ABORTED);
6675                 }
6676                 if (itask->itask_flags & ITASK_BEING_ABORTED) {
6677                         itask->itask_ncmds = 1;
6678                         curcmd = itask->itask_cmd_stack[0] =
6679                             ITASK_CMD_ABORT;
6680                         goto out_itask_flag_loop;
6681                 } else if ((curcmd & ITASK_CMD_MASK) == ITASK_CMD_NEW_TASK) {
6682                         /*
6683                          * set ITASK_KSTAT_IN_RUNQ, this flag
6684                          * will not reset until task completed
6685                          */
6686                         itask->itask_flags |= ITASK_KNOWN_TO_LU |
6687                             ITASK_KSTAT_IN_RUNQ;
6688                 } else {
6689                         goto out_itask_flag_loop;
6690                 }
6691 
6692 out_itask_flag_loop:
6693 
6694                 /*
6695                  * Decide if this task needs to go to a queue and/or if
6696                  * we can decrement the itask_cmd_stack.
6697                  */
6698                 if (curcmd == ITASK_CMD_ABORT) {
6699                         if (itask->itask_flags & (ITASK_KNOWN_TO_LU |
6700                             ITASK_KNOWN_TO_TGT_PORT)) {
6701                                 wait_queue = 1;
6702                         } else {
6703                                 abort_free = 1;
6704                         }
6705                 } else if ((curcmd & ITASK_CMD_POLL) &&
6706                     (itask->itask_poll_timeout > ddi_get_lbolt())) {
6707                         wait_queue = 1;
6708                 }
6709 
6710                 if (wait_queue) {
6711                         itask->itask_worker_next = NULL;
6712                         if (w->worker_wait_tail) {
6713                                 w->worker_wait_tail->itask_worker_next = itask;
6714                         } else {
6715                                 w->worker_wait_head = itask;
6716                         }
6717                         w->worker_wait_tail = itask;
6718                         if (wait_timer == 0) {
6719                                 wait_timer = ddi_get_lbolt() + wait_ticks;
6720                                 wait_delta = wait_ticks;
6721                         }
6722                 } else if ((--(itask->itask_ncmds)) != 0) {
6723                         itask->itask_worker_next = NULL;
6724                         if (w->worker_task_tail) {
6725                                 w->worker_task_tail->itask_worker_next = itask;
6726                         } else {
6727                                 w->worker_task_head = itask;
6728                         }
6729                         w->worker_task_tail = itask;
6730                 } else {
6731                         atomic_and_32(&itask->itask_flags,
6732                             ~ITASK_IN_WORKER_QUEUE);
6733                         /*
6734                          * This is where the queue depth should go down by
6735                          * one but we delay that on purpose to account for
6736                          * the call into the provider. The actual decrement
6737                          * happens after the worker has done its job.
6738                          */
6739                         dec_qdepth = 1;
6740                         itask->itask_waitq_time +=
6741                             gethrtime() - itask->itask_waitq_enter_timestamp;
6742                 }
6743 
6744                 /* We made it here means we are going to call LU */
6745                 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0)
6746                         lu = task->task_lu;
6747                 else
6748                         lu = dlun0;
6749 
6750                 dbuf = itask->itask_dbufs[ITASK_CMD_BUF_NDX(curcmd)];
6751                 mutex_exit(&w->worker_lock);
6752                 curcmd &= ITASK_CMD_MASK;
6753                 stmf_task_audit(itask, TE_PROCESS_CMD, curcmd, dbuf);
6754                 mutex_exit(&itask->itask_mutex);
6755 
6756                 switch (curcmd) {
6757                 case ITASK_CMD_NEW_TASK:
6758                         iss = (stmf_i_scsi_session_t *)
6759                             task->task_session->ss_stmf_private;
6760                         stmf_itl_lu_new_task(itask);
6761                         if (iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) {
6762                                 if (stmf_handle_cmd_during_ic(itask)) {
6763                                         break;
6764                                 }
6765                         }
6766 #ifdef  DEBUG
6767                         if (stmf_drop_task_counter > 0) {
6768                                 if (atomic_dec_32_nv(
6769                                     (uint32_t *)&stmf_drop_task_counter) == 1) {
6770                                         break;
6771                                 }
6772                         }
6773 #endif
6774                         DTRACE_PROBE1(scsi__task__start, scsi_task_t *, task);
6775                         lu->lu_new_task(task, dbuf);
6776                         break;
6777                 case ITASK_CMD_DATA_XFER_DONE:
6778                         lu->lu_dbuf_xfer_done(task, dbuf);
6779                         break;
6780                 case ITASK_CMD_STATUS_DONE:
6781                         lu->lu_send_status_done(task);
6782                         break;
6783                 case ITASK_CMD_ABORT:
6784                         if (abort_free) {
6785                                 mutex_enter(&itask->itask_mutex);
6786                                 stmf_task_free(task);
6787                         } else {
6788                                 stmf_do_task_abort(task);
6789                         }
6790                         break;
6791                 case ITASK_CMD_POLL_LU:
6792                         if (!wait_queue) {
6793                                 lu->lu_task_poll(task);
6794                         }
6795                         break;
6796                 case ITASK_CMD_POLL_LPORT:
6797                         if (!wait_queue)
6798                                 task->task_lport->lport_task_poll(task);
6799                         break;
6800                 case ITASK_CMD_SEND_STATUS:
6801                 /* case ITASK_CMD_XFER_DATA: */
6802                         break;
6803                 }
6804 
6805                 mutex_enter(&w->worker_lock);
6806                 if (dec_qdepth) {
6807                         w->worker_queue_depth--;
6808                 }
6809         }
6810         if ((w->worker_flags & STMF_WORKER_TERMINATE) && (wait_timer == 0)) {
6811                 if (w->worker_ref_count == 0)
6812                         goto stmf_worker_loop;
6813                 else {
6814                         wait_timer = ddi_get_lbolt() + 1;
6815                         wait_delta = 1;
6816                 }
6817         }
6818         w->worker_flags &= ~STMF_WORKER_ACTIVE;
6819         if (wait_timer) {
6820                 DTRACE_PROBE1(worker__timed__sleep, stmf_worker_t, w);
6821                 (void) cv_reltimedwait(&w->worker_cv, &w->worker_lock,
6822                     wait_delta, TR_CLOCK_TICK);
6823         } else {
6824                 DTRACE_PROBE1(worker__sleep, stmf_worker_t, w);
6825                 cv_wait(&w->worker_cv, &w->worker_lock);
6826         }
6827         DTRACE_PROBE1(worker__wakeup, stmf_worker_t, w);
6828         w->worker_flags |= STMF_WORKER_ACTIVE;
6829         goto stmf_worker_loop;
6830 }
6831 
6832 /*
6833  * Fills out a dbuf from stmf_xfer_data_t (contained in the db_lu_private).
6834  * If all the data has been filled out, frees the xd and makes
6835  * db_lu_private NULL.
6836  */
6837 void
6838 stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off)
6839 {
6840         stmf_xfer_data_t *xd;
6841         uint8_t *p;
6842         int i;
6843         uint32_t s;
6844 
6845         xd = (stmf_xfer_data_t *)dbuf->db_lu_private;
6846         dbuf->db_data_size = 0;
6847         if (set_rel_off)
6848                 dbuf->db_relative_offset = xd->size_done;
6849         for (i = 0; i < dbuf->db_sglist_length; i++) {
6850                 s = min(xd->size_left, dbuf->db_sglist[i].seg_length);
6851                 p = &xd->buf[xd->size_done];
6852                 bcopy(p, dbuf->db_sglist[i].seg_addr, s);
6853                 xd->size_left -= s;
6854                 xd->size_done += s;
6855                 dbuf->db_data_size += s;
6856                 if (xd->size_left == 0) {
6857                         kmem_free(xd, xd->alloc_size);
6858                         dbuf->db_lu_private = NULL;
6859                         return;
6860                 }
6861         }
6862 }
6863 
6864 /* ARGSUSED */
6865 stmf_status_t
6866 stmf_dlun0_task_alloc(scsi_task_t *task)
6867 {
6868         return (STMF_SUCCESS);
6869 }
6870 
6871 void
6872 stmf_dlun0_new_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
6873 {
6874         uint8_t *cdbp = (uint8_t *)&task->task_cdb[0];
6875         stmf_i_scsi_session_t *iss;
6876         uint32_t sz, minsz;
6877         uint8_t *p;
6878         stmf_xfer_data_t *xd;
6879         uint8_t inq_page_length = 31;
6880 
6881         if (task->task_mgmt_function) {
6882                 stmf_scsilib_handle_task_mgmt(task);
6883                 return;
6884         }
6885 
6886         switch (cdbp[0]) {
6887         case SCMD_INQUIRY:
6888                 /*
6889                  * Basic protocol checks.  In addition, only reply to
6890                  * standard inquiry.  Otherwise, the LU provider needs
6891                  * to respond.
6892                  */
6893 
6894                 if (cdbp[2] || (cdbp[1] & 1) || cdbp[5]) {
6895                         stmf_scsilib_send_status(task, STATUS_CHECK,
6896                             STMF_SAA_INVALID_FIELD_IN_CDB);
6897                         return;
6898                 }
6899 
6900                 task->task_cmd_xfer_length =
6901                     (((uint32_t)cdbp[3]) << 8) | cdbp[4];
6902 
6903                 if (task->task_additional_flags &
6904                     TASK_AF_NO_EXPECTED_XFER_LENGTH) {
6905                         task->task_expected_xfer_length =
6906                             task->task_cmd_xfer_length;
6907                 }
6908 
6909                 sz = min(task->task_expected_xfer_length,
6910                     min(36, task->task_cmd_xfer_length));
6911                 minsz = 36;
6912 
6913                 if (sz == 0) {
6914                         stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6915                         return;
6916                 }
6917 
6918                 if (dbuf && (dbuf->db_sglist[0].seg_length < 36)) {
6919                         /*
6920                          * Ignore any preallocated dbuf if the size is less
6921                          * than 36. It will be freed during the task_free.
6922                          */
6923                         dbuf = NULL;
6924                 }
6925                 if (dbuf == NULL)
6926                         dbuf = stmf_alloc_dbuf(task, minsz, &minsz, 0);
6927                 if ((dbuf == NULL) || (dbuf->db_sglist[0].seg_length < sz)) {
6928                         stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6929                             STMF_ALLOC_FAILURE, NULL);
6930                         return;
6931                 }
6932                 dbuf->db_lu_private = NULL;
6933 
6934                 p = dbuf->db_sglist[0].seg_addr;
6935 
6936                 /*
6937                  * Standard inquiry handling only.
6938                  */
6939 
6940                 bzero(p, inq_page_length + 5);
6941 
6942                 p[0] = DPQ_SUPPORTED | DTYPE_UNKNOWN;
6943                 p[2] = 5;
6944                 p[3] = 0x12;
6945                 p[4] = inq_page_length;
6946                 p[6] = 0x80;
6947 
6948                 (void) strncpy((char *)p+8, "NONE    ", 8);
6949                 (void) strncpy((char *)p+16, "NONE            ", 16);
6950                 (void) strncpy((char *)p+32, "NONE", 4);
6951 
6952                 dbuf->db_data_size = sz;
6953                 dbuf->db_relative_offset = 0;
6954                 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
6955                 (void) stmf_xfer_data(task, dbuf, 0);
6956 
6957                 return;
6958 
6959         case SCMD_REPORT_LUNS:
6960                 task->task_cmd_xfer_length =
6961                     ((((uint32_t)task->task_cdb[6]) << 24) |
6962                     (((uint32_t)task->task_cdb[7]) << 16) |
6963                     (((uint32_t)task->task_cdb[8]) << 8) |
6964                     ((uint32_t)task->task_cdb[9]));
6965 
6966                 if (task->task_additional_flags &
6967                     TASK_AF_NO_EXPECTED_XFER_LENGTH) {
6968                         task->task_expected_xfer_length =
6969                             task->task_cmd_xfer_length;
6970                 }
6971 
6972                 sz = min(task->task_expected_xfer_length,
6973                     task->task_cmd_xfer_length);
6974 
6975                 if (sz < 16) {
6976                         stmf_scsilib_send_status(task, STATUS_CHECK,
6977                             STMF_SAA_INVALID_FIELD_IN_CDB);
6978                         return;
6979                 }
6980 
6981                 iss = (stmf_i_scsi_session_t *)
6982                     task->task_session->ss_stmf_private;
6983                 rw_enter(iss->iss_lockp, RW_WRITER);
6984                 xd = stmf_session_prepare_report_lun_data(iss->iss_sm);
6985                 rw_exit(iss->iss_lockp);
6986 
6987                 if (xd == NULL) {
6988                         stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6989                             STMF_ALLOC_FAILURE, NULL);
6990                         return;
6991                 }
6992 
6993                 sz = min(sz, xd->size_left);
6994                 xd->size_left = sz;
6995                 minsz = min(512, sz);
6996 
6997                 if (dbuf == NULL)
6998                         dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
6999                 if (dbuf == NULL) {
7000                         kmem_free(xd, xd->alloc_size);
7001                         stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7002                             STMF_ALLOC_FAILURE, NULL);
7003                         return;
7004                 }
7005                 dbuf->db_lu_private = xd;
7006                 stmf_xd_to_dbuf(dbuf, 1);
7007 
7008                 atomic_and_32(&iss->iss_flags,
7009                     ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
7010                 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
7011                 (void) stmf_xfer_data(task, dbuf, 0);
7012                 return;
7013         }
7014 
7015         stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE);
7016 }
7017 
7018 void
7019 stmf_dlun0_dbuf_done(scsi_task_t *task, stmf_data_buf_t *dbuf)
7020 {
7021         stmf_i_scsi_task_t *itask =
7022             (stmf_i_scsi_task_t *)task->task_stmf_private;
7023 
7024         if (dbuf->db_xfer_status != STMF_SUCCESS) {
7025                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7026                     dbuf->db_xfer_status, NULL);
7027                 return;
7028         }
7029         task->task_nbytes_transferred += dbuf->db_data_size;
7030         if (dbuf->db_lu_private) {
7031                 /* There is more */
7032                 stmf_xd_to_dbuf(dbuf, 1);
7033                 (void) stmf_xfer_data(task, dbuf, 0);
7034                 return;
7035         }
7036 
7037         stmf_free_dbuf(task, dbuf);
7038         /*
7039          * If this is a proxy task, it will need to be completed from the
7040          * proxy port provider. This message lets pppt know that the xfer
7041          * is complete. When we receive the status from pppt, we will
7042          * then relay that status back to the lport.
7043          */
7044         if (itask->itask_flags & ITASK_PROXY_TASK) {
7045                 stmf_ic_msg_t *ic_xfer_done_msg = NULL;
7046                 stmf_status_t ic_ret = STMF_FAILURE;
7047                 uint64_t session_msg_id;
7048                 mutex_enter(&stmf_state.stmf_lock);
7049                 session_msg_id = stmf_proxy_msg_id++;
7050                 mutex_exit(&stmf_state.stmf_lock);
7051                 /* send xfer done status to pppt */
7052                 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc(
7053                     itask->itask_proxy_msg_id,
7054                     task->task_session->ss_session_id,
7055                     STMF_SUCCESS, session_msg_id);
7056                 if (ic_xfer_done_msg) {
7057                         ic_ret = ic_tx_msg(ic_xfer_done_msg);
7058                         if (ic_ret != STMF_IC_MSG_SUCCESS) {
7059                                 cmn_err(CE_WARN, "unable to xmit session msg");
7060                         }
7061                 }
7062                 /* task will be completed from pppt */
7063                 return;
7064         }
7065         stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7066 }
7067 
7068 /* ARGSUSED */
7069 void
7070 stmf_dlun0_status_done(scsi_task_t *task)
7071 {
7072 }
7073 
7074 /* ARGSUSED */
7075 void
7076 stmf_dlun0_task_free(scsi_task_t *task)
7077 {
7078 }
7079 
7080 /* ARGSUSED */
7081 stmf_status_t
7082 stmf_dlun0_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags)
7083 {
7084         scsi_task_t *task = (scsi_task_t *)arg;
7085         stmf_i_scsi_task_t *itask =
7086             (stmf_i_scsi_task_t *)task->task_stmf_private;
7087         stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7088         int i;
7089         uint8_t map;
7090 
7091         if ((task->task_mgmt_function) && (itask->itask_flags &
7092             (ITASK_CAUSING_LU_RESET | ITASK_CAUSING_TARGET_RESET))) {
7093                 switch (task->task_mgmt_function) {
7094                 case TM_ABORT_TASK:
7095                 case TM_ABORT_TASK_SET:
7096                 case TM_CLEAR_TASK_SET:
7097                 case TM_LUN_RESET:
7098                         atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7099                         break;
7100                 case TM_TARGET_RESET:
7101                 case TM_TARGET_COLD_RESET:
7102                 case TM_TARGET_WARM_RESET:
7103                         stmf_abort_target_reset(task);
7104                         break;
7105                 }
7106                 return (STMF_ABORT_SUCCESS);
7107         }
7108 
7109         /*
7110          * OK so its not a task mgmt. Make sure we free any xd sitting
7111          * inside any dbuf.
7112          */
7113         if ((map = itask->itask_allocated_buf_map) != 0) {
7114                 for (i = 0; i < 4; i++) {
7115                         if ((map & 1) &&
7116                             ((itask->itask_dbufs[i])->db_lu_private)) {
7117                                 stmf_xfer_data_t *xd;
7118                                 stmf_data_buf_t *dbuf;
7119 
7120                                 dbuf = itask->itask_dbufs[i];
7121                                 xd = (stmf_xfer_data_t *)dbuf->db_lu_private;
7122                                 dbuf->db_lu_private = NULL;
7123                                 kmem_free(xd, xd->alloc_size);
7124                         }
7125                         map >>= 1;
7126                 }
7127         }
7128         return (STMF_ABORT_SUCCESS);
7129 }
7130 
7131 void
7132 stmf_dlun0_task_poll(struct scsi_task *task)
7133 {
7134         /* Right now we only do this for handling task management functions */
7135         ASSERT(task->task_mgmt_function);
7136 
7137         switch (task->task_mgmt_function) {
7138         case TM_ABORT_TASK:
7139         case TM_ABORT_TASK_SET:
7140         case TM_CLEAR_TASK_SET:
7141         case TM_LUN_RESET:
7142                 (void) stmf_lun_reset_poll(task->task_lu, task, 0);
7143                 return;
7144         case TM_TARGET_RESET:
7145         case TM_TARGET_COLD_RESET:
7146         case TM_TARGET_WARM_RESET:
7147                 stmf_target_reset_poll(task);
7148                 return;
7149         }
7150 }
7151 
7152 /* ARGSUSED */
7153 void
7154 stmf_dlun0_ctl(struct stmf_lu *lu, int cmd, void *arg)
7155 {
7156         /* This function will never be called */
7157         cmn_err(CE_WARN, "stmf_dlun0_ctl called with cmd %x", cmd);
7158 }
7159 
7160 /* ARGSUSED */
7161 void
7162 stmf_dlun0_task_done(struct scsi_task *task)
7163 {
7164 }
7165 
7166 void
7167 stmf_dlun_init()
7168 {
7169         stmf_i_lu_t *ilu;
7170 
7171         dlun0 = stmf_alloc(STMF_STRUCT_STMF_LU, 0, 0);
7172         dlun0->lu_task_alloc = stmf_dlun0_task_alloc;
7173         dlun0->lu_new_task = stmf_dlun0_new_task;
7174         dlun0->lu_dbuf_xfer_done = stmf_dlun0_dbuf_done;
7175         dlun0->lu_send_status_done = stmf_dlun0_status_done;
7176         dlun0->lu_task_free = stmf_dlun0_task_free;
7177         dlun0->lu_abort = stmf_dlun0_abort;
7178         dlun0->lu_task_poll = stmf_dlun0_task_poll;
7179         dlun0->lu_task_done = stmf_dlun0_task_done;
7180         dlun0->lu_ctl = stmf_dlun0_ctl;
7181 
7182         ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private;
7183         ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
7184 }
7185 
7186 stmf_status_t
7187 stmf_dlun_fini()
7188 {
7189         stmf_i_lu_t *ilu;
7190 
7191         ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private;
7192 
7193         ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free);
7194         if (ilu->ilu_ntasks) {
7195                 stmf_i_scsi_task_t *itask, *nitask;
7196 
7197                 nitask = ilu->ilu_tasks;
7198                 do {
7199                         itask = nitask;
7200                         nitask = itask->itask_lu_next;
7201                         dlun0->lu_task_free(itask->itask_task);
7202                         stmf_free(itask->itask_task);
7203                 } while (nitask != NULL);
7204 
7205         }
7206         stmf_free(dlun0);
7207         return (STMF_SUCCESS);
7208 }
7209 
7210 void
7211 stmf_abort_target_reset(scsi_task_t *task)
7212 {
7213         stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
7214             task->task_session->ss_stmf_private;
7215         stmf_lun_map_t *lm;
7216         stmf_lun_map_ent_t *lm_ent;
7217         stmf_i_lu_t *ilu;
7218         int i;
7219 
7220         rw_enter(iss->iss_lockp, RW_READER);
7221         lm = iss->iss_sm;
7222         for (i = 0; i < lm->lm_nentries; i++) {
7223                 if (lm->lm_plus[i] == NULL)
7224                         continue;
7225                 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
7226                 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private;
7227                 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
7228                         atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7229                 }
7230         }
7231         atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
7232         rw_exit(iss->iss_lockp);
7233 }
7234 
7235 /*
7236  * The return value is only used by function managing target reset.
7237  */
7238 stmf_status_t
7239 stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, int target_reset)
7240 {
7241         stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7242         int ntasks_pending;
7243 
7244         ntasks_pending = ilu->ilu_ntasks - ilu->ilu_ntasks_free;
7245         /*
7246          * This function is also used during Target reset. The idea is that
7247          * once all the commands are aborted, call the LU's reset entry
7248          * point (abort entry point with a reset flag). But if this Task
7249          * mgmt is running on this LU then all the tasks cannot be aborted.
7250          * one task (this task) will still be running which is OK.
7251          */
7252         if ((ntasks_pending == 0) || ((task->task_lu == lu) &&
7253             (ntasks_pending == 1))) {
7254                 stmf_status_t ret;
7255 
7256                 if ((task->task_mgmt_function == TM_LUN_RESET) ||
7257                     (task->task_mgmt_function == TM_TARGET_RESET) ||
7258                     (task->task_mgmt_function == TM_TARGET_WARM_RESET) ||
7259                     (task->task_mgmt_function == TM_TARGET_COLD_RESET)) {
7260                         ret = lu->lu_abort(lu, STMF_LU_RESET_STATE, task, 0);
7261                 } else {
7262                         ret = STMF_SUCCESS;
7263                 }
7264                 if (ret == STMF_SUCCESS) {
7265                         atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7266                 }
7267                 if (target_reset) {
7268                         return (ret);
7269                 }
7270                 if (ret == STMF_SUCCESS) {
7271                         stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7272                         return (ret);
7273                 }
7274                 if (ret != STMF_BUSY) {
7275                         stmf_abort(STMF_QUEUE_TASK_ABORT, task, ret, NULL);
7276                         return (ret);
7277                 }
7278         }
7279 
7280         if (target_reset) {
7281                 /* Tell target reset polling code that we are not done */
7282                 return (STMF_BUSY);
7283         }
7284 
7285         if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
7286             != STMF_SUCCESS) {
7287                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7288                     STMF_ALLOC_FAILURE, NULL);
7289                 return (STMF_SUCCESS);
7290         }
7291 
7292         return (STMF_SUCCESS);
7293 }
7294 
7295 void
7296 stmf_target_reset_poll(struct scsi_task *task)
7297 {
7298         stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
7299             task->task_session->ss_stmf_private;
7300         stmf_lun_map_t *lm;
7301         stmf_lun_map_ent_t *lm_ent;
7302         stmf_i_lu_t *ilu;
7303         stmf_status_t ret;
7304         int i;
7305         int not_done = 0;
7306 
7307         ASSERT(iss->iss_flags & ISS_RESET_ACTIVE);
7308 
7309         rw_enter(iss->iss_lockp, RW_READER);
7310         lm = iss->iss_sm;
7311         for (i = 0; i < lm->lm_nentries; i++) {
7312                 if (lm->lm_plus[i] == NULL)
7313                         continue;
7314                 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
7315                 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private;
7316                 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
7317                         rw_exit(iss->iss_lockp);
7318                         ret = stmf_lun_reset_poll(lm_ent->ent_lu, task, 1);
7319                         rw_enter(iss->iss_lockp, RW_READER);
7320                         if (ret == STMF_SUCCESS)
7321                                 continue;
7322                         not_done = 1;
7323                         if (ret != STMF_BUSY) {
7324                                 rw_exit(iss->iss_lockp);
7325                                 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7326                                     STMF_ABORTED, NULL);
7327                                 return;
7328                         }
7329                 }
7330         }
7331         rw_exit(iss->iss_lockp);
7332 
7333         if (not_done) {
7334                 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
7335                     != STMF_SUCCESS) {
7336                         stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7337                             STMF_ALLOC_FAILURE, NULL);
7338                         return;
7339                 }
7340                 return;
7341         }
7342 
7343         atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
7344 
7345         stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7346 }
7347 
7348 stmf_status_t
7349 stmf_lu_add_event(stmf_lu_t *lu, int eventid)
7350 {
7351         stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7352 
7353         if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7354                 return (STMF_INVALID_ARG);
7355         }
7356 
7357         STMF_EVENT_ADD(ilu->ilu_event_hdl, eventid);
7358         return (STMF_SUCCESS);
7359 }
7360 
7361 stmf_status_t
7362 stmf_lu_remove_event(stmf_lu_t *lu, int eventid)
7363 {
7364         stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7365 
7366         if (eventid == STMF_EVENT_ALL) {
7367                 STMF_EVENT_CLEAR_ALL(ilu->ilu_event_hdl);
7368                 return (STMF_SUCCESS);
7369         }
7370 
7371         if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7372                 return (STMF_INVALID_ARG);
7373         }
7374 
7375         STMF_EVENT_REMOVE(ilu->ilu_event_hdl, eventid);
7376         return (STMF_SUCCESS);
7377 }
7378 
7379 stmf_status_t
7380 stmf_lport_add_event(stmf_local_port_t *lport, int eventid)
7381 {
7382         stmf_i_local_port_t *ilport =
7383             (stmf_i_local_port_t *)lport->lport_stmf_private;
7384 
7385         if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7386                 return (STMF_INVALID_ARG);
7387         }
7388 
7389         STMF_EVENT_ADD(ilport->ilport_event_hdl, eventid);
7390         return (STMF_SUCCESS);
7391 }
7392 
7393 stmf_status_t
7394 stmf_lport_remove_event(stmf_local_port_t *lport, int eventid)
7395 {
7396         stmf_i_local_port_t *ilport =
7397             (stmf_i_local_port_t *)lport->lport_stmf_private;
7398 
7399         if (eventid == STMF_EVENT_ALL) {
7400                 STMF_EVENT_CLEAR_ALL(ilport->ilport_event_hdl);
7401                 return (STMF_SUCCESS);
7402         }
7403 
7404         if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7405                 return (STMF_INVALID_ARG);
7406         }
7407 
7408         STMF_EVENT_REMOVE(ilport->ilport_event_hdl, eventid);
7409         return (STMF_SUCCESS);
7410 }
7411 
7412 void
7413 stmf_generate_lu_event(stmf_i_lu_t *ilu, int eventid, void *arg, uint32_t flags)
7414 {
7415         if (STMF_EVENT_ENABLED(ilu->ilu_event_hdl, eventid) &&
7416             (ilu->ilu_lu->lu_event_handler != NULL)) {
7417                 ilu->ilu_lu->lu_event_handler(ilu->ilu_lu, eventid, arg, flags);
7418         }
7419 }
7420 
7421 void
7422 stmf_generate_lport_event(stmf_i_local_port_t *ilport, int eventid, void *arg,
7423     uint32_t flags)
7424 {
7425         if (STMF_EVENT_ENABLED(ilport->ilport_event_hdl, eventid) &&
7426             (ilport->ilport_lport->lport_event_handler != NULL)) {
7427                 ilport->ilport_lport->lport_event_handler(
7428                     ilport->ilport_lport, eventid, arg, flags);
7429         }
7430 }
7431 
7432 /*
7433  * With the possibility of having multiple itl sessions pointing to the
7434  * same itl_kstat_info, the ilu_kstat_lock mutex is used to synchronize
7435  * the kstat update of the ilu_kstat_io, itl_kstat_taskq and itl_kstat_lu_xfer
7436  * statistics.
7437  */
7438 void
7439 stmf_itl_task_start(stmf_i_scsi_task_t *itask)
7440 {
7441         stmf_itl_data_t *itl = itask->itask_itl_datap;
7442         scsi_task_t     *task = itask->itask_task;
7443         stmf_i_lu_t     *ilu;
7444         stmf_i_scsi_session_t   *iss =
7445             itask->itask_task->task_session->ss_stmf_private;
7446         stmf_i_remote_port_t    *irport = iss->iss_irport;
7447 
7448         if (itl == NULL || task->task_lu == dlun0)
7449                 return;
7450         ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7451         itask->itask_start_timestamp = gethrtime();
7452         itask->itask_xfer_done_timestamp = 0;
7453         if (ilu->ilu_kstat_io != NULL) {
7454                 mutex_enter(ilu->ilu_kstat_io->ks_lock);
7455                 stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_enter);
7456                 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7457         }
7458 
7459         if (irport->irport_kstat_estat != NULL) {
7460                 if (task->task_flags & TF_READ_DATA)
7461                         atomic_inc_32(&irport->irport_nread_tasks);
7462                 else if (task->task_flags & TF_WRITE_DATA)
7463                         atomic_inc_32(&irport->irport_nwrite_tasks);
7464         }
7465 
7466         stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_enter);
7467 }
7468 
7469 void
7470 stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask)
7471 {
7472         stmf_itl_data_t *itl = itask->itask_itl_datap;
7473         scsi_task_t     *task = itask->itask_task;
7474         stmf_i_lu_t     *ilu;
7475 
7476         if (itl == NULL || task->task_lu == dlun0)
7477                 return;
7478         ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7479         if (ilu->ilu_kstat_io != NULL) {
7480                 mutex_enter(ilu->ilu_kstat_io->ks_lock);
7481                 stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_to_runq);
7482                 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7483         }
7484 
7485         stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_to_runq);
7486 }
7487 
7488 void
7489 stmf_itl_task_done(stmf_i_scsi_task_t *itask)
7490 {
7491         stmf_itl_data_t         *itl = itask->itask_itl_datap;
7492         scsi_task_t             *task = itask->itask_task;
7493         stmf_i_lu_t     *ilu;
7494 
7495         itask->itask_done_timestamp = gethrtime();
7496 
7497         if (itl == NULL || task->task_lu == dlun0)
7498                 return;
7499         ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7500 
7501         if (ilu->ilu_kstat_io == NULL)
7502                 return;
7503 
7504         stmf_update_kstat_rport_estat(task);
7505 
7506         mutex_enter(ilu->ilu_kstat_io->ks_lock);
7507 
7508         if (itask->itask_flags & ITASK_KSTAT_IN_RUNQ) {
7509                 stmf_update_kstat_lu_q(task, kstat_runq_exit);
7510                 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7511                 stmf_update_kstat_lport_q(task, kstat_runq_exit);
7512         } else {
7513                 stmf_update_kstat_lu_q(task, kstat_waitq_exit);
7514                 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7515                 stmf_update_kstat_lport_q(task, kstat_waitq_exit);
7516         }
7517 }
7518 
7519 void
7520 stmf_lu_xfer_done(scsi_task_t *task, boolean_t read, hrtime_t elapsed_time)
7521 {
7522         stmf_i_scsi_task_t *itask = task->task_stmf_private;
7523 
7524         if (task->task_lu == dlun0)
7525                 return;
7526 
7527         if (read) {
7528                 atomic_add_64((uint64_t *)&itask->itask_lu_read_time,
7529                     elapsed_time);
7530         } else {
7531                 atomic_add_64((uint64_t *)&itask->itask_lu_write_time,
7532                     elapsed_time);
7533         }
7534 }
7535 
7536 static void
7537 stmf_lport_xfer_start(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf)
7538 {
7539         stmf_itl_data_t         *itl = itask->itask_itl_datap;
7540 
7541         if (itl == NULL)
7542                 return;
7543 
7544         DTRACE_PROBE2(scsi__xfer__start, scsi_task_t *, itask->itask_task,
7545             stmf_data_buf_t *, dbuf);
7546 
7547         dbuf->db_xfer_start_timestamp = gethrtime();
7548 }
7549 
7550 static void
7551 stmf_lport_xfer_done(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf)
7552 {
7553         stmf_itl_data_t         *itl = itask->itask_itl_datap;
7554         hrtime_t                elapsed_time;
7555         uint64_t                xfer_size;
7556 
7557         if (itl == NULL)
7558                 return;
7559 
7560         xfer_size = (dbuf->db_xfer_status == STMF_SUCCESS) ?
7561             dbuf->db_data_size : 0;
7562 
7563         itask->itask_xfer_done_timestamp = gethrtime();
7564         elapsed_time = itask->itask_xfer_done_timestamp -
7565             dbuf->db_xfer_start_timestamp;
7566         if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
7567                 atomic_add_64((uint64_t *)&itask->itask_lport_read_time,
7568                     elapsed_time);
7569                 atomic_add_64((uint64_t *)&itask->itask_read_xfer,
7570                     xfer_size);
7571         } else {
7572                 atomic_add_64((uint64_t *)&itask->itask_lport_write_time,
7573                     elapsed_time);
7574                 atomic_add_64((uint64_t *)&itask->itask_write_xfer,
7575                     xfer_size);
7576         }
7577 
7578         DTRACE_PROBE3(scsi__xfer__end, scsi_task_t *, itask->itask_task,
7579             stmf_data_buf_t *, dbuf, hrtime_t, elapsed_time);
7580 
7581         dbuf->db_xfer_start_timestamp = 0;
7582 }
7583 
7584 void
7585 stmf_svc_init()
7586 {
7587         if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED)
7588                 return;
7589         list_create(&stmf_state.stmf_svc_list, sizeof (stmf_svc_req_t),
7590             offsetof(stmf_svc_req_t, svc_list_entry));
7591         stmf_state.stmf_svc_taskq = ddi_taskq_create(0, "STMF_SVC_TASKQ", 1,
7592             TASKQ_DEFAULTPRI, 0);
7593         (void) ddi_taskq_dispatch(stmf_state.stmf_svc_taskq,
7594             stmf_svc, 0, DDI_SLEEP);
7595 }
7596 
7597 stmf_status_t
7598 stmf_svc_fini()
7599 {
7600         uint32_t i;
7601 
7602         mutex_enter(&stmf_state.stmf_lock);
7603         if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) {
7604                 stmf_state.stmf_svc_flags |= STMF_SVC_TERMINATE;
7605                 cv_signal(&stmf_state.stmf_cv);
7606         }
7607         mutex_exit(&stmf_state.stmf_lock);
7608 
7609         /* Wait for 5 seconds */
7610         for (i = 0; i < 500; i++) {
7611                 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED)
7612                         delay(drv_usectohz(10000));
7613                 else
7614                         break;
7615         }
7616         if (i == 500)
7617                 return (STMF_BUSY);
7618 
7619         list_destroy(&stmf_state.stmf_svc_list);
7620         ddi_taskq_destroy(stmf_state.stmf_svc_taskq);
7621 
7622         return (STMF_SUCCESS);
7623 }
7624 
7625 struct stmf_svc_clocks {
7626         clock_t drain_start, drain_next;
7627         clock_t timing_start, timing_next;
7628         clock_t worker_delay;
7629 };
7630 
7631 /* ARGSUSED */
7632 void
7633 stmf_svc(void *arg)
7634 {
7635         stmf_svc_req_t *req;
7636         stmf_lu_t *lu;
7637         stmf_i_lu_t *ilu;
7638         stmf_local_port_t *lport;
7639         struct stmf_svc_clocks clks = { 0 };
7640 
7641         mutex_enter(&stmf_state.stmf_lock);
7642         stmf_state.stmf_svc_flags |= STMF_SVC_STARTED | STMF_SVC_ACTIVE;
7643 
7644         while (!(stmf_state.stmf_svc_flags & STMF_SVC_TERMINATE)) {
7645                 if (list_is_empty(&stmf_state.stmf_svc_list)) {
7646                         stmf_svc_timeout(&clks);
7647                         continue;
7648                 }
7649 
7650                 /*
7651                  * Pop the front request from the active list.  After this,
7652                  * the request will no longer be referenced by global state,
7653                  * so it should be safe to access it without holding the
7654                  * stmf state lock.
7655                  */
7656                 req = list_remove_head(&stmf_state.stmf_svc_list);
7657                 if (req == NULL)
7658                         continue;
7659 
7660                 switch (req->svc_cmd) {
7661                 case STMF_CMD_LPORT_ONLINE:
7662                         /* Fallthrough */
7663                 case STMF_CMD_LPORT_OFFLINE:
7664                         mutex_exit(&stmf_state.stmf_lock);
7665                         lport = (stmf_local_port_t *)req->svc_obj;
7666                         lport->lport_ctl(lport, req->svc_cmd, &req->svc_info);
7667                         break;
7668                 case STMF_CMD_LU_ONLINE:
7669                         mutex_exit(&stmf_state.stmf_lock);
7670                         lu = (stmf_lu_t *)req->svc_obj;
7671                         lu->lu_ctl(lu, req->svc_cmd, &req->svc_info);
7672                         break;
7673                 case STMF_CMD_LU_OFFLINE:
7674                         /* Remove all mappings of this LU */
7675                         stmf_session_lu_unmapall((stmf_lu_t *)req->svc_obj);
7676                         /* Kill all the pending I/Os for this LU */
7677                         mutex_exit(&stmf_state.stmf_lock);
7678                         stmf_task_lu_killall((stmf_lu_t *)req->svc_obj, NULL,
7679                             STMF_ABORTED);
7680                         lu = (stmf_lu_t *)req->svc_obj;
7681                         ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7682                         stmf_wait_ilu_tasks_finish(ilu);
7683                         lu->lu_ctl(lu, req->svc_cmd, &req->svc_info);
7684                         break;
7685                 default:
7686                         cmn_err(CE_PANIC, "stmf_svc: unknown cmd %d",
7687                             req->svc_cmd);
7688                 }
7689 
7690                 kmem_free(req, req->svc_req_alloc_size);
7691                 mutex_enter(&stmf_state.stmf_lock);
7692         }
7693 
7694         stmf_state.stmf_svc_flags &= ~(STMF_SVC_STARTED | STMF_SVC_ACTIVE);
7695         mutex_exit(&stmf_state.stmf_lock);
7696 }
7697 
7698 static void
7699 stmf_svc_timeout(struct stmf_svc_clocks *clks)
7700 {
7701         clock_t td;
7702         stmf_i_local_port_t *ilport, *next_ilport;
7703         stmf_i_scsi_session_t *iss;
7704 
7705         ASSERT(mutex_owned(&stmf_state.stmf_lock));
7706 
7707         td = drv_usectohz(20000);
7708 
7709         /* Do timeouts */
7710         if (stmf_state.stmf_nlus &&
7711             ((!clks->timing_next) || (ddi_get_lbolt() >= clks->timing_next))) {
7712                 if (!stmf_state.stmf_svc_ilu_timing) {
7713                         /* we are starting a new round */
7714                         stmf_state.stmf_svc_ilu_timing =
7715                             stmf_state.stmf_ilulist;
7716                         clks->timing_start = ddi_get_lbolt();
7717                 }
7718 
7719                 stmf_check_ilu_timing();
7720                 if (!stmf_state.stmf_svc_ilu_timing) {
7721                         /* we finished a complete round */
7722                         clks->timing_next =
7723                             clks->timing_start + drv_usectohz(5*1000*1000);
7724                 } else {
7725                         /* we still have some ilu items to check */
7726                         clks->timing_next =
7727                             ddi_get_lbolt() + drv_usectohz(1*1000*1000);
7728                 }
7729 
7730                 if (!list_is_empty(&stmf_state.stmf_svc_list))
7731                         return;
7732         }
7733 
7734         /* Check if there are free tasks to clear */
7735         if (stmf_state.stmf_nlus &&
7736             ((!clks->drain_next) || (ddi_get_lbolt() >= clks->drain_next))) {
7737                 if (!stmf_state.stmf_svc_ilu_draining) {
7738                         /* we are starting a new round */
7739                         stmf_state.stmf_svc_ilu_draining =
7740                             stmf_state.stmf_ilulist;
7741                         clks->drain_start = ddi_get_lbolt();
7742                 }
7743 
7744                 stmf_check_freetask();
7745                 if (!stmf_state.stmf_svc_ilu_draining) {
7746                         /* we finished a complete round */
7747                         clks->drain_next =
7748                             clks->drain_start + drv_usectohz(10*1000*1000);
7749                 } else {
7750                         /* we still have some ilu items to check */
7751                         clks->drain_next =
7752                             ddi_get_lbolt() + drv_usectohz(1*1000*1000);
7753                 }
7754 
7755                 if (!list_is_empty(&stmf_state.stmf_svc_list))
7756                         return;
7757         }
7758 
7759         /* Check if any active session got its 1st LUN */
7760         if (stmf_state.stmf_process_initial_luns) {
7761                 int stmf_level = 0;
7762                 int port_level;
7763 
7764                 for (ilport = stmf_state.stmf_ilportlist; ilport;
7765                     ilport = next_ilport) {
7766                         int ilport_lock_held;
7767                         next_ilport = ilport->ilport_next;
7768 
7769                         if ((ilport->ilport_flags &
7770                             ILPORT_SS_GOT_INITIAL_LUNS) == 0)
7771                                 continue;
7772 
7773                         port_level = 0;
7774                         rw_enter(&ilport->ilport_lock, RW_READER);
7775                         ilport_lock_held = 1;
7776 
7777                         for (iss = ilport->ilport_ss_list; iss;
7778                             iss = iss->iss_next) {
7779                                 if ((iss->iss_flags &
7780                                     ISS_GOT_INITIAL_LUNS) == 0)
7781                                         continue;
7782 
7783                                 port_level++;
7784                                 stmf_level++;
7785                                 atomic_and_32(&iss->iss_flags,
7786                                     ~ISS_GOT_INITIAL_LUNS);
7787                                 atomic_or_32(&iss->iss_flags,
7788                                     ISS_EVENT_ACTIVE);
7789                                 rw_exit(&ilport->ilport_lock);
7790                                 ilport_lock_held = 0;
7791                                 mutex_exit(&stmf_state.stmf_lock);
7792                                 stmf_generate_lport_event(ilport,
7793                                     LPORT_EVENT_INITIAL_LUN_MAPPED,
7794                                     iss->iss_ss, 0);
7795                                 atomic_and_32(&iss->iss_flags,
7796                                     ~ISS_EVENT_ACTIVE);
7797                                 mutex_enter(&stmf_state.stmf_lock);
7798                                 /*
7799                                  * scan all the ilports again as the
7800                                  * ilport list might have changed.
7801                                  */
7802                                 next_ilport = stmf_state.stmf_ilportlist;
7803                                 break;
7804                         }
7805 
7806                         if (port_level == 0)
7807                                 atomic_and_32(&ilport->ilport_flags,
7808                                     ~ILPORT_SS_GOT_INITIAL_LUNS);
7809                         /* drop the lock if we are holding it. */
7810                         if (ilport_lock_held == 1)
7811                                 rw_exit(&ilport->ilport_lock);
7812 
7813                         /* Max 4 session at a time */
7814                         if (stmf_level >= 4)
7815                                 break;
7816                 }
7817 
7818                 if (stmf_level == 0)
7819                         stmf_state.stmf_process_initial_luns = 0;
7820         }
7821 
7822         stmf_state.stmf_svc_flags &= ~STMF_SVC_ACTIVE;
7823         (void) cv_reltimedwait(&stmf_state.stmf_cv,
7824             &stmf_state.stmf_lock, td, TR_CLOCK_TICK);
7825         stmf_state.stmf_svc_flags |= STMF_SVC_ACTIVE;
7826 }
7827 
7828 /*
7829  * Waits for ongoing I/O tasks to finish on an LU in preparation for
7830  * the LU's offlining. The LU should already be in an Offlining state
7831  * (otherwise I/O to the LU might never end). There is an additional
7832  * enforcement of this via a deadman timer check.
7833  */
7834 static void
7835 stmf_wait_ilu_tasks_finish(stmf_i_lu_t *ilu)
7836 {
7837         clock_t start, now, deadline;
7838 
7839         start = now = ddi_get_lbolt();
7840         deadline = start + drv_usectohz(stmf_io_deadman * 1000000llu);
7841         mutex_enter(&ilu->ilu_task_lock);
7842         while (ilu->ilu_ntasks != ilu->ilu_ntasks_free) {
7843                 (void) cv_timedwait(&ilu->ilu_offline_pending_cv,
7844                     &ilu->ilu_task_lock, deadline);
7845                 now = ddi_get_lbolt();
7846                 if (now > deadline) {
7847                         if (stmf_io_deadman_enabled) {
7848                                 cmn_err(CE_PANIC, "stmf_svc: I/O deadman hit "
7849                                     "on STMF_CMD_LU_OFFLINE after %d seconds",
7850                                     stmf_io_deadman);
7851                         } else {
7852                                 /* keep on spinning */
7853                                 deadline = now + drv_usectohz(stmf_io_deadman *
7854                                     1000000llu);
7855                         }
7856                 }
7857         }
7858         mutex_exit(&ilu->ilu_task_lock);
7859         DTRACE_PROBE1(deadman__timeout__wait, clock_t, now - start);
7860 }
7861 
7862 void
7863 stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info)
7864 {
7865         stmf_svc_req_t *req;
7866         int s;
7867 
7868         ASSERT(!mutex_owned(&stmf_state.stmf_lock));
7869         s = sizeof (stmf_svc_req_t);
7870         if (info->st_additional_info) {
7871                 s += strlen(info->st_additional_info) + 1;
7872         }
7873         req = kmem_zalloc(s, KM_SLEEP);
7874 
7875         req->svc_cmd = cmd;
7876         req->svc_obj = obj;
7877         req->svc_info.st_rflags = info->st_rflags;
7878         if (info->st_additional_info) {
7879                 req->svc_info.st_additional_info = (char *)(GET_BYTE_OFFSET(req,
7880                     sizeof (stmf_svc_req_t)));
7881                 (void) strcpy(req->svc_info.st_additional_info,
7882                     info->st_additional_info);
7883         }
7884         req->svc_req_alloc_size = s;
7885 
7886         mutex_enter(&stmf_state.stmf_lock);
7887         list_insert_tail(&stmf_state.stmf_svc_list, req);
7888         if ((stmf_state.stmf_svc_flags & STMF_SVC_ACTIVE) == 0) {
7889                 cv_signal(&stmf_state.stmf_cv);
7890         }
7891         mutex_exit(&stmf_state.stmf_lock);
7892 }
7893 
7894 static void
7895 stmf_svc_kill_obj_requests(void *obj)
7896 {
7897         stmf_svc_req_t *req;
7898 
7899         ASSERT(mutex_owned(&stmf_state.stmf_lock));
7900 
7901         for (req = list_head(&stmf_state.stmf_svc_list); req != NULL;
7902             req = list_next(&stmf_state.stmf_svc_list, req)) {
7903                 if (req->svc_obj == obj) {
7904                         list_remove(&stmf_state.stmf_svc_list, req);
7905                         kmem_free(req, req->svc_req_alloc_size);
7906                 }
7907         }
7908 }
7909 
7910 void
7911 stmf_trace(caddr_t ident, const char *fmt, ...)
7912 {
7913         va_list args;
7914         char tbuf[160];
7915         int len;
7916 
7917         if (!stmf_trace_on)
7918                 return;
7919         len = snprintf(tbuf, 158, "%s:%07lu: ", ident ? ident : "",
7920             ddi_get_lbolt());
7921         va_start(args, fmt);
7922         len += vsnprintf(tbuf + len, 158 - len, fmt, args);
7923         va_end(args);
7924 
7925         if (len > 158) {
7926                 len = 158;
7927         }
7928         tbuf[len++] = '\n';
7929         tbuf[len] = 0;
7930 
7931         mutex_enter(&trace_buf_lock);
7932         bcopy(tbuf, &stmf_trace_buf[trace_buf_curndx], len+1);
7933         trace_buf_curndx += len;
7934         if (trace_buf_curndx > (trace_buf_size - 320))
7935                 trace_buf_curndx = 0;
7936         mutex_exit(&trace_buf_lock);
7937 }
7938 
7939 void
7940 stmf_trace_clear()
7941 {
7942         if (!stmf_trace_on)
7943                 return;
7944         mutex_enter(&trace_buf_lock);
7945         trace_buf_curndx = 0;
7946         if (trace_buf_size > 0)
7947                 stmf_trace_buf[0] = 0;
7948         mutex_exit(&trace_buf_lock);
7949 }
7950 
7951 /*
7952  * NOTE: Due to lock order problems that are not possible to fix this
7953  * method drops and reacquires the itask_mutex around the call to stmf_ctl.
7954  * Another possible work around would be to use a dispatch queue and have
7955  * the call to stmf_ctl run on another thread that's not holding the
7956  * itask_mutex. The problem with that approach is that it's difficult to
7957  * determine what impact an asynchronous change would have on the system state.
7958  */
7959 static void
7960 stmf_abort_task_offline(scsi_task_t *task, int offline_lu, char *info)
7961 {
7962         stmf_state_change_info_t        change_info;
7963         void                            *ctl_private;
7964         uint32_t                        ctl_cmd;
7965         int                             msg = 0;
7966         stmf_i_scsi_task_t              *itask =
7967             (stmf_i_scsi_task_t *)task->task_stmf_private;
7968 
7969         stmf_trace("FROM STMF", "abort_task_offline called for %s: %s",
7970             offline_lu ? "LU" : "LPORT", info ? info : "no additional info");
7971         change_info.st_additional_info = info;
7972         ASSERT(mutex_owned(&itask->itask_mutex));
7973 
7974         if (offline_lu) {
7975                 change_info.st_rflags = STMF_RFLAG_RESET |
7976                     STMF_RFLAG_LU_ABORT;
7977                 ctl_private = task->task_lu;
7978                 if (((stmf_i_lu_t *)
7979                     task->task_lu->lu_stmf_private)->ilu_state ==
7980                     STMF_STATE_ONLINE) {
7981                         msg = 1;
7982                 }
7983                 ctl_cmd = STMF_CMD_LU_OFFLINE;
7984         } else {
7985                 change_info.st_rflags = STMF_RFLAG_RESET |
7986                     STMF_RFLAG_LPORT_ABORT;
7987                 ctl_private = task->task_lport;
7988                 if (((stmf_i_local_port_t *)
7989                     task->task_lport->lport_stmf_private)->ilport_state ==
7990                     STMF_STATE_ONLINE) {
7991                         msg = 1;
7992                 }
7993                 ctl_cmd = STMF_CMD_LPORT_OFFLINE;
7994         }
7995 
7996         if (msg) {
7997                 stmf_trace(0, "Calling stmf_ctl to offline %s : %s",
7998                     offline_lu ? "LU" : "LPORT", info ? info :
7999                     "<no additional info>");
8000         }
8001         mutex_exit(&itask->itask_mutex);
8002         (void) stmf_ctl(ctl_cmd, ctl_private, &change_info);
8003         mutex_enter(&itask->itask_mutex);
8004 }
8005 
8006 static char
8007 stmf_ctoi(char c)
8008 {
8009         if ((c >= '0') && (c <= '9'))
8010                 c -= '0';
8011         else if ((c >= 'A') && (c <= 'F'))
8012                 c = c - 'A' + 10;
8013         else if ((c >= 'a') && (c <= 'f'))
8014                 c = c - 'a' + 10;
8015         else
8016                 c = -1;
8017         return (c);
8018 }
8019 
8020 /* Convert from Hex value in ASCII format to the equivalent bytes */
8021 static boolean_t
8022 stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp)
8023 {
8024         int             ii;
8025 
8026         for (ii = 0; ii < dplen; ii++) {
8027                 char nibble1, nibble2;
8028                 char enc_char = *c++;
8029                 nibble1 = stmf_ctoi(enc_char);
8030 
8031                 enc_char = *c++;
8032                 nibble2 = stmf_ctoi(enc_char);
8033                 if (nibble1 == -1 || nibble2 == -1)
8034                         return (B_FALSE);
8035 
8036                 dp[ii] = (nibble1 << 4) | nibble2;
8037         }
8038         return (B_TRUE);
8039 }
8040 
8041 boolean_t
8042 stmf_scsilib_tptid_validate(scsi_transport_id_t *tptid, uint32_t total_sz,
8043     uint16_t *tptid_sz)
8044 {
8045         uint16_t tpd_len = SCSI_TPTID_SIZE;
8046 
8047         if (tptid_sz)
8048                 *tptid_sz = 0;
8049         if (total_sz < sizeof (scsi_transport_id_t))
8050                 return (B_FALSE);
8051 
8052         switch (tptid->protocol_id) {
8053 
8054         case PROTOCOL_FIBRE_CHANNEL:
8055                 /* FC Transport ID validation checks. SPC3 rev23, Table 284 */
8056                 if (total_sz < tpd_len || tptid->format_code != 0)
8057                         return (B_FALSE);
8058                 break;
8059 
8060         case PROTOCOL_iSCSI: /* CSTYLED */
8061                 {
8062                 iscsi_transport_id_t    *iscsiid;
8063                 uint16_t                adn_len, name_len;
8064 
8065                 /* Check for valid format code, SPC3 rev 23 Table 288 */
8066                 if ((total_sz < tpd_len) ||
8067                     (tptid->format_code != 0 && tptid->format_code != 1))
8068                         return (B_FALSE);
8069 
8070                 iscsiid = (iscsi_transport_id_t *)tptid;
8071                 adn_len = READ_SCSI16(iscsiid->add_len, uint16_t);
8072                 tpd_len = sizeof (iscsi_transport_id_t) + adn_len - 1;
8073 
8074                 /*
8075                  * iSCSI Transport ID validation checks.
8076                  * As per SPC3 rev 23 Section 7.5.4.6 and Table 289 & Table 290
8077                  */
8078                 if (adn_len < 20 || (adn_len % 4 != 0))
8079                         return (B_FALSE);
8080 
8081                 name_len = strnlen(iscsiid->iscsi_name, adn_len);
8082                 if (name_len == 0 || name_len >= adn_len)
8083                         return (B_FALSE);
8084 
8085                 /* If the format_code is 1 check for ISID seperator */
8086                 if ((tptid->format_code == 1) && (strstr(iscsiid->iscsi_name,
8087                     SCSI_TPTID_ISCSI_ISID_SEPERATOR) == NULL))
8088                         return (B_FALSE);
8089 
8090                 }
8091                 break;
8092 
8093         case PROTOCOL_SRP:
8094                 /* SRP Transport ID validation checks. SPC3 rev23, Table 287 */
8095                 if (total_sz < tpd_len || tptid->format_code != 0)
8096                         return (B_FALSE);
8097                 break;
8098 
8099         case PROTOCOL_PARALLEL_SCSI:
8100         case PROTOCOL_SSA:
8101         case PROTOCOL_IEEE_1394:
8102         case PROTOCOL_SAS:
8103         case PROTOCOL_ADT:
8104         case PROTOCOL_ATAPI:
8105         default: /* CSTYLED */
8106                 {
8107                 stmf_dflt_scsi_tptid_t *dflttpd;
8108 
8109                 tpd_len = sizeof (stmf_dflt_scsi_tptid_t);
8110                 if (total_sz < tpd_len)
8111                         return (B_FALSE);
8112                 dflttpd = (stmf_dflt_scsi_tptid_t *)tptid;
8113                 tpd_len = tpd_len + SCSI_READ16(&dflttpd->ident_len) - 1;
8114                 if (total_sz < tpd_len)
8115                         return (B_FALSE);
8116                 }
8117                 break;
8118         }
8119         if (tptid_sz)
8120                 *tptid_sz = tpd_len;
8121         return (B_TRUE);
8122 }
8123 
8124 boolean_t
8125 stmf_scsilib_tptid_compare(scsi_transport_id_t *tpd1,
8126     scsi_transport_id_t *tpd2)
8127 {
8128         if ((tpd1->protocol_id != tpd2->protocol_id) ||
8129             (tpd1->format_code != tpd2->format_code))
8130                 return (B_FALSE);
8131 
8132         switch (tpd1->protocol_id) {
8133 
8134         case PROTOCOL_iSCSI: /* CSTYLED */
8135                 {
8136                 iscsi_transport_id_t *iscsitpd1, *iscsitpd2;
8137                 uint16_t len;
8138 
8139                 iscsitpd1 = (iscsi_transport_id_t *)tpd1;
8140                 iscsitpd2 = (iscsi_transport_id_t *)tpd2;
8141                 len = SCSI_READ16(&iscsitpd1->add_len);
8142                 if ((memcmp(iscsitpd1->add_len, iscsitpd2->add_len, 2) != 0) ||
8143                     (memcmp(iscsitpd1->iscsi_name, iscsitpd2->iscsi_name, len)
8144                     != 0))
8145                         return (B_FALSE);
8146                 }
8147                 break;
8148 
8149         case PROTOCOL_SRP: /* CSTYLED */
8150                 {
8151                 scsi_srp_transport_id_t *srptpd1, *srptpd2;
8152 
8153                 srptpd1 = (scsi_srp_transport_id_t *)tpd1;
8154                 srptpd2 = (scsi_srp_transport_id_t *)tpd2;
8155                 if (memcmp(srptpd1->srp_name, srptpd2->srp_name,
8156                     sizeof (srptpd1->srp_name)) != 0)
8157                         return (B_FALSE);
8158                 }
8159                 break;
8160 
8161         case PROTOCOL_FIBRE_CHANNEL: /* CSTYLED */
8162                 {
8163                 scsi_fc_transport_id_t *fctpd1, *fctpd2;
8164 
8165                 fctpd1 = (scsi_fc_transport_id_t *)tpd1;
8166                 fctpd2 = (scsi_fc_transport_id_t *)tpd2;
8167                 if (memcmp(fctpd1->port_name, fctpd2->port_name,
8168                     sizeof (fctpd1->port_name)) != 0)
8169                         return (B_FALSE);
8170                 }
8171                 break;
8172 
8173         case PROTOCOL_PARALLEL_SCSI:
8174         case PROTOCOL_SSA:
8175         case PROTOCOL_IEEE_1394:
8176         case PROTOCOL_SAS:
8177         case PROTOCOL_ADT:
8178         case PROTOCOL_ATAPI:
8179         default: /* CSTYLED */
8180                 {
8181                 stmf_dflt_scsi_tptid_t *dflt1, *dflt2;
8182                 uint16_t len;
8183 
8184                 dflt1 = (stmf_dflt_scsi_tptid_t *)tpd1;
8185                 dflt2 = (stmf_dflt_scsi_tptid_t *)tpd2;
8186                 len = SCSI_READ16(&dflt1->ident_len);
8187                 if ((memcmp(dflt1->ident_len, dflt2->ident_len, 2) != 0) ||
8188                     (memcmp(dflt1->ident, dflt2->ident, len) != 0))
8189                         return (B_FALSE);
8190                 }
8191                 break;
8192         }
8193         return (B_TRUE);
8194 }
8195 
8196 /*
8197  * Changes devid_desc to corresponding TransportID format
8198  * Returns :- pointer to stmf_remote_port_t
8199  * Note    :- Allocates continous memory for stmf_remote_port_t and TransportID,
8200  *            This memory need to be freed when this remote_port is no longer
8201  *            used.
8202  */
8203 stmf_remote_port_t *
8204 stmf_scsilib_devid_to_remote_port(scsi_devid_desc_t *devid)
8205 {
8206         struct scsi_fc_transport_id     *fc_tpd;
8207         struct iscsi_transport_id       *iscsi_tpd;
8208         struct scsi_srp_transport_id    *srp_tpd;
8209         struct stmf_dflt_scsi_tptid     *dflt_tpd;
8210         uint16_t ident_len,  sz = 0;
8211         stmf_remote_port_t *rpt = NULL;
8212 
8213         ident_len = devid->ident_length;
8214         ASSERT(ident_len);
8215         switch (devid->protocol_id) {
8216         case PROTOCOL_FIBRE_CHANNEL:
8217                 sz = sizeof (scsi_fc_transport_id_t);
8218                 rpt = stmf_remote_port_alloc(sz);
8219                 rpt->rport_tptid->format_code = 0;
8220                 rpt->rport_tptid->protocol_id = devid->protocol_id;
8221                 fc_tpd = (scsi_fc_transport_id_t *)rpt->rport_tptid;
8222                 /*
8223                  * convert from "wwn.xxxxxxxxxxxxxxxx" to 8-byte binary
8224                  * skip first 4 byte for "wwn."
8225                  */
8226                 ASSERT(strncmp("wwn.", (char *)devid->ident, 4) == 0);
8227                 if ((ident_len < SCSI_TPTID_FC_PORT_NAME_SIZE * 2 + 4) ||
8228                     !stmf_base16_str_to_binary((char *)devid->ident + 4,
8229                     SCSI_TPTID_FC_PORT_NAME_SIZE, fc_tpd->port_name))
8230                         goto devid_to_remote_port_fail;
8231                 break;
8232 
8233         case PROTOCOL_iSCSI:
8234                 sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (iscsi_transport_id_t) +
8235                     ident_len - 1);
8236                 rpt = stmf_remote_port_alloc(sz);
8237                 rpt->rport_tptid->format_code = 0;
8238                 rpt->rport_tptid->protocol_id = devid->protocol_id;
8239                 iscsi_tpd = (iscsi_transport_id_t *)rpt->rport_tptid;
8240                 SCSI_WRITE16(iscsi_tpd->add_len, ident_len);
8241                 (void) memcpy(iscsi_tpd->iscsi_name, devid->ident, ident_len);
8242                 break;
8243 
8244         case PROTOCOL_SRP:
8245                 sz = sizeof (scsi_srp_transport_id_t);
8246                 rpt = stmf_remote_port_alloc(sz);
8247                 rpt->rport_tptid->format_code = 0;
8248                 rpt->rport_tptid->protocol_id = devid->protocol_id;
8249                 srp_tpd = (scsi_srp_transport_id_t *)rpt->rport_tptid;
8250                 /*
8251                  * convert from "eui.xxxxxxxxxxxxxxx" to 8-byte binary
8252                  * skip first 4 byte for "eui."
8253                  * Assume 8-byte initiator-extension part of srp_name is NOT
8254                  * stored in devid and hence will be set as zero
8255                  */
8256                 ASSERT(strncmp("eui.", (char *)devid->ident, 4) == 0);
8257                 if ((ident_len < (SCSI_TPTID_SRP_PORT_NAME_LEN - 8) * 2 + 4) ||
8258                     !stmf_base16_str_to_binary((char *)devid->ident+4,
8259                     SCSI_TPTID_SRP_PORT_NAME_LEN, srp_tpd->srp_name))
8260                         goto devid_to_remote_port_fail;
8261                 break;
8262 
8263         case PROTOCOL_PARALLEL_SCSI:
8264         case PROTOCOL_SSA:
8265         case PROTOCOL_IEEE_1394:
8266         case PROTOCOL_SAS:
8267         case PROTOCOL_ADT:
8268         case PROTOCOL_ATAPI:
8269         default :
8270                 ident_len = devid->ident_length;
8271                 sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (stmf_dflt_scsi_tptid_t) +
8272                     ident_len - 1);
8273                 rpt = stmf_remote_port_alloc(sz);
8274                 rpt->rport_tptid->format_code = 0;
8275                 rpt->rport_tptid->protocol_id = devid->protocol_id;
8276                 dflt_tpd = (stmf_dflt_scsi_tptid_t *)rpt->rport_tptid;
8277                 SCSI_WRITE16(dflt_tpd->ident_len, ident_len);
8278                 (void) memcpy(dflt_tpd->ident, devid->ident, ident_len);
8279                 break;
8280         }
8281         return (rpt);
8282 
8283 devid_to_remote_port_fail:
8284         stmf_remote_port_free(rpt);
8285         return (NULL);
8286 
8287 }
8288 
8289 stmf_remote_port_t *
8290 stmf_remote_port_alloc(uint16_t tptid_sz)
8291 {
8292         stmf_remote_port_t *rpt;
8293         rpt = (stmf_remote_port_t *)kmem_zalloc(
8294             sizeof (stmf_remote_port_t) + tptid_sz, KM_SLEEP);
8295         rpt->rport_tptid_sz = tptid_sz;
8296         rpt->rport_tptid = (scsi_transport_id_t *)(rpt + 1);
8297         return (rpt);
8298 }
8299 
8300 void
8301 stmf_remote_port_free(stmf_remote_port_t *rpt)
8302 {
8303         /*
8304          * Note: stmf_scsilib_devid_to_remote_port() function allocates
8305          *      remote port structures for all transports in the same way, So
8306          *      it is safe to deallocate it in a protocol independent manner.
8307          *      If any of the allocation method changes, corresponding changes
8308          *      need to be made here too.
8309          */
8310         kmem_free(rpt, sizeof (stmf_remote_port_t) + rpt->rport_tptid_sz);
8311 }
8312 
8313 stmf_lu_t *
8314 stmf_check_and_hold_lu(scsi_task_t *task, uint8_t *guid)
8315 {
8316         stmf_i_scsi_session_t *iss;
8317         stmf_lu_t *lu;
8318         stmf_i_lu_t *ilu = NULL;
8319         stmf_lun_map_t *sm;
8320         stmf_lun_map_ent_t *lme;
8321         int i;
8322 
8323         iss = (stmf_i_scsi_session_t *)task->task_session->ss_stmf_private;
8324         rw_enter(iss->iss_lockp, RW_READER);
8325         sm = iss->iss_sm;
8326 
8327         for (i = 0; i < sm->lm_nentries; i++) {
8328                 if (sm->lm_plus[i] == NULL)
8329                         continue;
8330                 lme = (stmf_lun_map_ent_t *)sm->lm_plus[i];
8331                 lu = lme->ent_lu;
8332                 if (bcmp(lu->lu_id->ident, guid, 16) == 0) {
8333                         break;
8334                 }
8335                 lu = NULL;
8336         }
8337 
8338         if (!lu) {
8339                 goto hold_lu_done;
8340         }
8341 
8342         ilu = lu->lu_stmf_private;
8343         mutex_enter(&ilu->ilu_task_lock);
8344         ilu->ilu_additional_ref++;
8345         mutex_exit(&ilu->ilu_task_lock);
8346 
8347 hold_lu_done:
8348         rw_exit(iss->iss_lockp);
8349         return (lu);
8350 }
8351 
8352 void
8353 stmf_release_lu(stmf_lu_t *lu)
8354 {
8355         stmf_i_lu_t *ilu;
8356 
8357         ilu = lu->lu_stmf_private;
8358         ASSERT(ilu->ilu_additional_ref != 0);
8359         mutex_enter(&ilu->ilu_task_lock);
8360         ilu->ilu_additional_ref--;
8361         mutex_exit(&ilu->ilu_task_lock);
8362 }
8363 
8364 int
8365 stmf_is_task_being_aborted(scsi_task_t *task)
8366 {
8367         stmf_i_scsi_task_t *itask;
8368 
8369         itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
8370         if (itask->itask_flags & ITASK_BEING_ABORTED)
8371                 return (1);
8372 
8373         return (0);
8374 }
8375 
8376 volatile boolean_t stmf_pgr_aptpl_always = B_FALSE;
8377 
8378 boolean_t
8379 stmf_is_pgr_aptpl_always()
8380 {
8381         return (stmf_pgr_aptpl_always);
8382 }