Print this page
3866 panic in idm module
3867 stmfCreateLu failed: GUID_IN_USE
3868 iscsi target not accepting any new connections
Reviewed by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed by: Jeremy Jones <jeremy@delphix.com>
Reviewed by: Eric Diven <eric.diven@delphix.com>
Reviewed by: Richard Lowe <richlowe@richlowe.net>
Reviewed by: T Nguyen <truongqnguien@gmail.com>
Approved by: Gordon Ross <gwr@nexenta.com>
3862 stmf + kstat = kernel panic
3863 stmf_itl_task_start() must check for ilu->ilu_kstat_io is non-null
3864 memory leak in the iSCSI code
Reviewed by: Adam Leventhal <ahl@delphix.com>
Reviewed by: Jeremy Jones <jeremy@delphix.com>
Reviewed by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed by: Dan McDonald <danmcd@nexenta.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Reviewed by: Richard Elling <richard.elling@gmail.com>
Approved by: Gordon Ross <gwr@nexenta.com>
3621 ZFS LU stuck in the offlining state
Reviewed by: Sebastien Roy <sebastien.roy@delphix.com>
Reviewed by: Jeff Biseda <jeff.biseda@delphix.com>
Reviewed by: Dan McDonald <danmcd@nexenta.com>
Approved by: Christopher Siden <christopher.siden@delphix.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/comstar/stmf/stmf.c
+++ new/usr/src/uts/common/io/comstar/stmf/stmf.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
|
↓ open down ↓ |
15 lines elided |
↑ open up ↑ |
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21 /*
22 22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 23 */
24 24 /*
25 25 * Copyright 2012, Nexenta Systems, Inc. All rights reserved.
26 + * Copyright (c) 2013 by Delphix. All rights reserved.
27 + * Copyright (c) 2013 by Saso Kiselkov. All rights reserved.
26 28 */
27 29
28 30 #include <sys/conf.h>
29 31 #include <sys/file.h>
30 32 #include <sys/ddi.h>
31 33 #include <sys/sunddi.h>
32 34 #include <sys/modctl.h>
33 35 #include <sys/scsi/scsi.h>
34 36 #include <sys/scsi/generic/persist.h>
35 37 #include <sys/scsi/impl/scsi_reset_notify.h>
36 38 #include <sys/disp.h>
37 39 #include <sys/byteorder.h>
38 40 #include <sys/atomic.h>
39 41 #include <sys/ethernet.h>
40 42 #include <sys/sdt.h>
41 43 #include <sys/nvpair.h>
42 44 #include <sys/zone.h>
43 45 #include <sys/id_space.h>
44 46
45 47 #include <sys/stmf.h>
46 48 #include <sys/lpif.h>
47 49 #include <sys/portif.h>
48 50 #include <sys/stmf_ioctl.h>
49 51 #include <sys/pppt_ic_if.h>
50 52
51 53 #include "stmf_impl.h"
52 54 #include "lun_map.h"
53 55 #include "stmf_state.h"
54 56 #include "stmf_stats.h"
55 57
56 58 /*
57 59 * Lock order:
|
↓ open down ↓ |
22 lines elided |
↑ open up ↑ |
58 60 * stmf_state_lock --> ilport_lock/iss_lockp --> ilu_task_lock
59 61 */
60 62
61 63 static uint64_t stmf_session_counter = 0;
62 64 static uint16_t stmf_rtpid_counter = 0;
63 65 /* start messages at 1 */
64 66 static uint64_t stmf_proxy_msg_id = 1;
65 67 #define MSG_ID_TM_BIT 0x8000000000000000
66 68 #define ALIGNED_TO_8BYTE_BOUNDARY(i) (((i) + 7) & ~7)
67 69
70 +/*
71 + * When stmf_io_deadman_enabled is set to B_TRUE, we check that finishing up
72 + * I/O operations on an offlining LU doesn't take longer than stmf_io_deadman
73 + * seconds. If it does, we trigger a panic to inform the user of hung I/O
74 + * blocking us for too long.
75 + */
76 +boolean_t stmf_io_deadman_enabled = B_TRUE;
77 +int stmf_io_deadman = 1000; /* seconds */
78 +
68 79 struct stmf_svc_clocks;
69 80
70 81 static int stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
71 82 static int stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
72 83 static int stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
73 84 void **result);
74 85 static int stmf_open(dev_t *devp, int flag, int otype, cred_t *credp);
75 86 static int stmf_close(dev_t dev, int flag, int otype, cred_t *credp);
76 87 static int stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
77 88 cred_t *credp, int *rval);
78 89 static int stmf_get_stmf_state(stmf_state_desc_t *std);
79 90 static int stmf_set_stmf_state(stmf_state_desc_t *std);
80 91 static void stmf_abort_task_offline(scsi_task_t *task, int offline_lu,
81 92 char *info);
82 93 static int stmf_set_alua_state(stmf_alua_state_desc_t *alua_state);
83 94 static void stmf_get_alua_state(stmf_alua_state_desc_t *alua_state);
|
↓ open down ↓ |
6 lines elided |
↑ open up ↑ |
84 95
85 96 static void stmf_task_audit(stmf_i_scsi_task_t *itask,
86 97 task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf);
87 98
88 99 static boolean_t stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp);
89 100 static char stmf_ctoi(char c);
90 101 stmf_xfer_data_t *stmf_prepare_tpgs_data(uint8_t ilu_alua);
91 102 void stmf_svc_init();
92 103 stmf_status_t stmf_svc_fini();
93 104 void stmf_svc(void *arg);
105 +static void stmf_wait_ilu_tasks_finish(stmf_i_lu_t *ilu);
94 106 void stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info);
95 107 static void stmf_svc_kill_obj_requests(void *obj);
96 108 static void stmf_svc_timeout(struct stmf_svc_clocks *);
97 109 void stmf_check_freetask();
98 110 void stmf_abort_target_reset(scsi_task_t *task);
99 111 stmf_status_t stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task,
100 112 int target_reset);
101 113 void stmf_target_reset_poll(struct scsi_task *task);
102 114 void stmf_handle_lun_reset(scsi_task_t *task);
103 115 void stmf_handle_target_reset(scsi_task_t *task);
104 116 void stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off);
105 117 int stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
106 118 uint32_t *err_ret);
107 119 int stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi);
108 120 int stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
109 121 uint32_t *err_ret);
110 122 void stmf_delete_ppd(stmf_pp_data_t *ppd);
111 123 void stmf_delete_all_ppds();
112 124 void stmf_trace_clear();
113 125 void stmf_worker_init();
114 126 stmf_status_t stmf_worker_fini();
115 127 void stmf_worker_mgmt();
116 128 void stmf_worker_task(void *arg);
117 129 static void stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss);
118 130 static stmf_status_t stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg,
119 131 uint32_t type);
120 132 static stmf_status_t stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg);
121 133 static stmf_status_t stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg);
122 134 static stmf_status_t stmf_ic_rx_status(stmf_ic_status_msg_t *msg);
123 135 static stmf_status_t stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg);
124 136 void stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s);
125 137
126 138 /* pppt modhandle */
127 139 ddi_modhandle_t pppt_mod;
128 140
129 141 /* pppt modload imported functions */
130 142 stmf_ic_reg_port_msg_alloc_func_t ic_reg_port_msg_alloc;
131 143 stmf_ic_dereg_port_msg_alloc_func_t ic_dereg_port_msg_alloc;
132 144 stmf_ic_reg_lun_msg_alloc_func_t ic_reg_lun_msg_alloc;
133 145 stmf_ic_dereg_lun_msg_alloc_func_t ic_dereg_lun_msg_alloc;
134 146 stmf_ic_lun_active_msg_alloc_func_t ic_lun_active_msg_alloc;
135 147 stmf_ic_scsi_cmd_msg_alloc_func_t ic_scsi_cmd_msg_alloc;
136 148 stmf_ic_scsi_data_xfer_done_msg_alloc_func_t ic_scsi_data_xfer_done_msg_alloc;
137 149 stmf_ic_session_create_msg_alloc_func_t ic_session_reg_msg_alloc;
138 150 stmf_ic_session_destroy_msg_alloc_func_t ic_session_dereg_msg_alloc;
139 151 stmf_ic_tx_msg_func_t ic_tx_msg;
140 152 stmf_ic_msg_free_func_t ic_msg_free;
141 153
142 154 static void stmf_itl_task_start(stmf_i_scsi_task_t *itask);
143 155 static void stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask);
144 156 static void stmf_itl_task_done(stmf_i_scsi_task_t *itask);
145 157
146 158 static void stmf_lport_xfer_start(stmf_i_scsi_task_t *itask,
147 159 stmf_data_buf_t *dbuf);
148 160 static void stmf_lport_xfer_done(stmf_i_scsi_task_t *itask,
149 161 stmf_data_buf_t *dbuf);
150 162
151 163 static void stmf_update_kstat_lu_q(scsi_task_t *, void());
152 164 static void stmf_update_kstat_lport_q(scsi_task_t *, void());
153 165 static void stmf_update_kstat_lu_io(scsi_task_t *, stmf_data_buf_t *);
154 166 static void stmf_update_kstat_lport_io(scsi_task_t *, stmf_data_buf_t *);
155 167
|
↓ open down ↓ |
52 lines elided |
↑ open up ↑ |
156 168 static int stmf_irport_compare(const void *void_irport1,
157 169 const void *void_irport2);
158 170 static stmf_i_remote_port_t *stmf_irport_create(scsi_devid_desc_t *rport_devid);
159 171 static void stmf_irport_destroy(stmf_i_remote_port_t *irport);
160 172 static stmf_i_remote_port_t *stmf_irport_register(
161 173 scsi_devid_desc_t *rport_devid);
162 174 static stmf_i_remote_port_t *stmf_irport_lookup_locked(
163 175 scsi_devid_desc_t *rport_devid);
164 176 static void stmf_irport_deregister(stmf_i_remote_port_t *irport);
165 177
166 -static void stmf_teardown_itl_kstats(stmf_i_itl_kstat_t *ks);
167 -static void stmf_delete_itl_kstat_by_lport(char *);
168 -static void stmf_delete_itl_kstat_by_guid(char *);
169 -static int stmf_itl_kstat_compare(const void*, const void*);
170 -static stmf_i_itl_kstat_t *stmf_itl_kstat_lookup(char *kstat_nm);
171 -static stmf_i_itl_kstat_t *stmf_itl_kstat_create(stmf_itl_data_t *itl,
172 - char *nm, scsi_devid_desc_t *lport, scsi_devid_desc_t *lun);
173 -
174 178 extern struct mod_ops mod_driverops;
175 179
176 180 /* =====[ Tunables ]===== */
177 181 /* Internal tracing */
178 182 volatile int stmf_trace_on = 1;
179 183 volatile int stmf_trace_buf_size = (1 * 1024 * 1024);
180 184 /*
181 185 * The reason default task timeout is 75 is because we want the
182 186 * host to timeout 1st and mostly host timeout is 60 seconds.
183 187 */
184 188 volatile int stmf_default_task_timeout = 75;
185 189 /*
186 190 * Setting this to one means, you are responsible for config load and keeping
187 191 * things in sync with persistent database.
188 192 */
189 193 volatile int stmf_allow_modunload = 0;
190 194
191 195 volatile int stmf_max_nworkers = 256;
192 196 volatile int stmf_min_nworkers = 4;
193 197 volatile int stmf_worker_scale_down_delay = 20;
194 198
195 199 /* === [ Debugging and fault injection ] === */
196 200 #ifdef DEBUG
197 201 volatile int stmf_drop_task_counter = 0;
198 202 volatile int stmf_drop_buf_counter = 0;
199 203
200 204 #endif
201 205
202 206 stmf_state_t stmf_state;
203 207 static stmf_lu_t *dlun0;
204 208
205 209 static uint8_t stmf_first_zero[] =
206 210 { 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0, 0xff };
207 211 static uint8_t stmf_first_one[] =
208 212 { 0xff, 0, 1, 0, 2, 0, 1, 0, 3, 0, 1, 0, 2, 0, 1, 0 };
209 213
210 214 static kmutex_t trace_buf_lock;
211 215 static int trace_buf_size;
212 216 static int trace_buf_curndx;
213 217 caddr_t stmf_trace_buf;
214 218
215 219 static enum {
216 220 STMF_WORKERS_DISABLED = 0,
217 221 STMF_WORKERS_ENABLING,
218 222 STMF_WORKERS_ENABLED
219 223 } stmf_workers_state = STMF_WORKERS_DISABLED;
220 224 static int stmf_i_max_nworkers;
221 225 static int stmf_i_min_nworkers;
222 226 static int stmf_nworkers_cur; /* # of workers currently running */
223 227 static int stmf_nworkers_needed; /* # of workers need to be running */
224 228 static int stmf_worker_sel_counter = 0;
225 229 static uint32_t stmf_cur_ntasks = 0;
226 230 static clock_t stmf_wm_last = 0;
227 231 /*
228 232 * This is equal to stmf_nworkers_cur while we are increasing # workers and
229 233 * stmf_nworkers_needed while we are decreasing the worker count.
230 234 */
231 235 static int stmf_nworkers_accepting_cmds;
232 236 static stmf_worker_t *stmf_workers = NULL;
233 237 static clock_t stmf_worker_mgmt_delay = 2;
234 238 static clock_t stmf_worker_scale_down_timer = 0;
235 239 static int stmf_worker_scale_down_qd = 0;
236 240
237 241 static struct cb_ops stmf_cb_ops = {
238 242 stmf_open, /* open */
239 243 stmf_close, /* close */
240 244 nodev, /* strategy */
241 245 nodev, /* print */
242 246 nodev, /* dump */
243 247 nodev, /* read */
244 248 nodev, /* write */
245 249 stmf_ioctl, /* ioctl */
246 250 nodev, /* devmap */
247 251 nodev, /* mmap */
248 252 nodev, /* segmap */
249 253 nochpoll, /* chpoll */
250 254 ddi_prop_op, /* cb_prop_op */
251 255 0, /* streamtab */
252 256 D_NEW | D_MP, /* cb_flag */
253 257 CB_REV, /* rev */
254 258 nodev, /* aread */
255 259 nodev /* awrite */
256 260 };
257 261
258 262 static struct dev_ops stmf_ops = {
259 263 DEVO_REV,
260 264 0,
261 265 stmf_getinfo,
262 266 nulldev, /* identify */
263 267 nulldev, /* probe */
264 268 stmf_attach,
265 269 stmf_detach,
266 270 nodev, /* reset */
267 271 &stmf_cb_ops,
268 272 NULL, /* bus_ops */
269 273 NULL /* power */
270 274 };
271 275
272 276 #define STMF_NAME "COMSTAR STMF"
273 277 #define STMF_MODULE_NAME "stmf"
274 278
275 279 static struct modldrv modldrv = {
276 280 &mod_driverops,
277 281 STMF_NAME,
278 282 &stmf_ops
279 283 };
280 284
281 285 static struct modlinkage modlinkage = {
282 286 MODREV_1,
283 287 &modldrv,
284 288 NULL
285 289 };
286 290
287 291 int
288 292 _init(void)
289 293 {
290 294 int ret;
291 295
292 296 ret = mod_install(&modlinkage);
293 297 if (ret)
294 298 return (ret);
295 299 stmf_trace_buf = kmem_zalloc(stmf_trace_buf_size, KM_SLEEP);
296 300 trace_buf_size = stmf_trace_buf_size;
297 301 trace_buf_curndx = 0;
298 302 mutex_init(&trace_buf_lock, NULL, MUTEX_DRIVER, 0);
299 303 bzero(&stmf_state, sizeof (stmf_state_t));
300 304 /* STMF service is off by default */
301 305 stmf_state.stmf_service_running = 0;
302 306 /* default lu/lport states are online */
303 307 stmf_state.stmf_default_lu_state = STMF_STATE_ONLINE;
304 308 stmf_state.stmf_default_lport_state = STMF_STATE_ONLINE;
|
↓ open down ↓ |
121 lines elided |
↑ open up ↑ |
305 309 mutex_init(&stmf_state.stmf_lock, NULL, MUTEX_DRIVER, NULL);
306 310 cv_init(&stmf_state.stmf_cv, NULL, CV_DRIVER, NULL);
307 311 stmf_session_counter = (uint64_t)ddi_get_lbolt();
308 312 avl_create(&stmf_state.stmf_irportlist,
309 313 stmf_irport_compare, sizeof (stmf_i_remote_port_t),
310 314 offsetof(stmf_i_remote_port_t, irport_ln));
311 315 stmf_state.stmf_ilport_inst_space =
312 316 id_space_create("lport-instances", 0, MAX_ILPORT);
313 317 stmf_state.stmf_irport_inst_space =
314 318 id_space_create("rport-instances", 0, MAX_IRPORT);
315 - avl_create(&stmf_state.stmf_itl_kstat_list,
316 - stmf_itl_kstat_compare, sizeof (stmf_i_itl_kstat_t),
317 - offsetof(stmf_i_itl_kstat_t, iitl_kstat_ln));
318 319 stmf_view_init();
319 320 stmf_svc_init();
320 321 stmf_dlun_init();
321 322 return (ret);
322 323 }
323 324
324 325 int
325 326 _fini(void)
326 327 {
327 328 int ret;
328 329 stmf_i_remote_port_t *irport;
329 - stmf_i_itl_kstat_t *ks_itl;
330 330 void *avl_dest_cookie = NULL;
331 331
332 332 if (stmf_state.stmf_service_running)
333 333 return (EBUSY);
334 334 if ((!stmf_allow_modunload) &&
335 335 (stmf_state.stmf_config_state != STMF_CONFIG_NONE)) {
336 336 return (EBUSY);
337 337 }
338 338 if (stmf_state.stmf_nlps || stmf_state.stmf_npps) {
339 339 return (EBUSY);
340 340 }
341 341 if (stmf_dlun_fini() != STMF_SUCCESS)
342 342 return (EBUSY);
343 343 if (stmf_worker_fini() != STMF_SUCCESS) {
344 344 stmf_dlun_init();
345 345 return (EBUSY);
346 346 }
347 347 if (stmf_svc_fini() != STMF_SUCCESS) {
348 348 stmf_dlun_init();
349 349 stmf_worker_init();
350 350 return (EBUSY);
351 351 }
352 352
353 353 ret = mod_remove(&modlinkage);
354 354 if (ret) {
355 355 stmf_svc_init();
356 356 stmf_dlun_init();
357 357 stmf_worker_init();
358 358 return (ret);
359 359 }
|
↓ open down ↓ |
20 lines elided |
↑ open up ↑ |
360 360
361 361 stmf_view_clear_config();
362 362
363 363 while ((irport = avl_destroy_nodes(&stmf_state.stmf_irportlist,
364 364 &avl_dest_cookie)) != NULL)
365 365 stmf_irport_destroy(irport);
366 366 avl_destroy(&stmf_state.stmf_irportlist);
367 367 id_space_destroy(stmf_state.stmf_ilport_inst_space);
368 368 id_space_destroy(stmf_state.stmf_irport_inst_space);
369 369
370 - avl_dest_cookie = NULL;
371 - while ((ks_itl = avl_destroy_nodes(&stmf_state.stmf_itl_kstat_list,
372 - &avl_dest_cookie)) != NULL) {
373 - stmf_teardown_itl_kstats(ks_itl);
374 - kmem_free(ks_itl, sizeof (ks_itl));
375 - }
376 - avl_destroy(&stmf_state.stmf_itl_kstat_list);
377 -
378 370 kmem_free(stmf_trace_buf, stmf_trace_buf_size);
379 371 mutex_destroy(&trace_buf_lock);
380 372 mutex_destroy(&stmf_state.stmf_lock);
381 373 cv_destroy(&stmf_state.stmf_cv);
382 374 return (ret);
383 375 }
384 376
385 377 int
386 378 _info(struct modinfo *modinfop)
387 379 {
388 380 return (mod_info(&modlinkage, modinfop));
389 381 }
390 382
391 383 /* ARGSUSED */
392 384 static int
393 385 stmf_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **result)
394 386 {
395 387 switch (cmd) {
396 388 case DDI_INFO_DEVT2DEVINFO:
397 389 *result = stmf_state.stmf_dip;
398 390 break;
399 391 case DDI_INFO_DEVT2INSTANCE:
400 392 *result =
401 393 (void *)(uintptr_t)ddi_get_instance(stmf_state.stmf_dip);
402 394 break;
403 395 default:
404 396 return (DDI_FAILURE);
405 397 }
406 398
407 399 return (DDI_SUCCESS);
408 400 }
409 401
410 402 static int
411 403 stmf_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
412 404 {
413 405 switch (cmd) {
414 406 case DDI_ATTACH:
415 407 stmf_state.stmf_dip = dip;
416 408
417 409 if (ddi_create_minor_node(dip, "admin", S_IFCHR, 0,
418 410 DDI_NT_STMF, 0) != DDI_SUCCESS) {
419 411 break;
420 412 }
421 413 ddi_report_dev(dip);
422 414 return (DDI_SUCCESS);
423 415 }
424 416
425 417 return (DDI_FAILURE);
426 418 }
427 419
428 420 static int
429 421 stmf_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
430 422 {
431 423 switch (cmd) {
432 424 case DDI_DETACH:
433 425 ddi_remove_minor_node(dip, 0);
434 426 return (DDI_SUCCESS);
435 427 }
436 428
437 429 return (DDI_FAILURE);
438 430 }
439 431
440 432 /* ARGSUSED */
441 433 static int
442 434 stmf_open(dev_t *devp, int flag, int otype, cred_t *credp)
443 435 {
444 436 mutex_enter(&stmf_state.stmf_lock);
445 437 if (stmf_state.stmf_exclusive_open) {
446 438 mutex_exit(&stmf_state.stmf_lock);
447 439 return (EBUSY);
448 440 }
449 441 if (flag & FEXCL) {
450 442 if (stmf_state.stmf_opened) {
451 443 mutex_exit(&stmf_state.stmf_lock);
452 444 return (EBUSY);
453 445 }
454 446 stmf_state.stmf_exclusive_open = 1;
455 447 }
456 448 stmf_state.stmf_opened = 1;
457 449 mutex_exit(&stmf_state.stmf_lock);
458 450 return (0);
459 451 }
460 452
461 453 /* ARGSUSED */
462 454 static int
463 455 stmf_close(dev_t dev, int flag, int otype, cred_t *credp)
464 456 {
465 457 mutex_enter(&stmf_state.stmf_lock);
466 458 stmf_state.stmf_opened = 0;
467 459 if (stmf_state.stmf_exclusive_open &&
468 460 (stmf_state.stmf_config_state != STMF_CONFIG_INIT_DONE)) {
469 461 stmf_state.stmf_config_state = STMF_CONFIG_NONE;
470 462 stmf_delete_all_ppds();
471 463 stmf_view_clear_config();
472 464 stmf_view_init();
473 465 }
474 466 stmf_state.stmf_exclusive_open = 0;
475 467 mutex_exit(&stmf_state.stmf_lock);
476 468 return (0);
477 469 }
478 470
479 471 int
480 472 stmf_copyin_iocdata(intptr_t data, int mode, stmf_iocdata_t **iocd,
481 473 void **ibuf, void **obuf)
482 474 {
483 475 int ret;
484 476
485 477 *ibuf = NULL;
486 478 *obuf = NULL;
487 479 *iocd = kmem_zalloc(sizeof (stmf_iocdata_t), KM_SLEEP);
488 480
489 481 ret = ddi_copyin((void *)data, *iocd, sizeof (stmf_iocdata_t), mode);
490 482 if (ret)
491 483 return (EFAULT);
492 484 if ((*iocd)->stmf_version != STMF_VERSION_1) {
493 485 ret = EINVAL;
494 486 goto copyin_iocdata_done;
495 487 }
496 488 if ((*iocd)->stmf_ibuf_size) {
497 489 *ibuf = kmem_zalloc((*iocd)->stmf_ibuf_size, KM_SLEEP);
498 490 ret = ddi_copyin((void *)((unsigned long)(*iocd)->stmf_ibuf),
499 491 *ibuf, (*iocd)->stmf_ibuf_size, mode);
500 492 }
501 493 if ((*iocd)->stmf_obuf_size)
502 494 *obuf = kmem_zalloc((*iocd)->stmf_obuf_size, KM_SLEEP);
503 495
504 496 if (ret == 0)
505 497 return (0);
506 498 ret = EFAULT;
507 499 copyin_iocdata_done:;
508 500 if (*obuf) {
509 501 kmem_free(*obuf, (*iocd)->stmf_obuf_size);
510 502 *obuf = NULL;
511 503 }
512 504 if (*ibuf) {
513 505 kmem_free(*ibuf, (*iocd)->stmf_ibuf_size);
514 506 *ibuf = NULL;
515 507 }
516 508 kmem_free(*iocd, sizeof (stmf_iocdata_t));
517 509 return (ret);
518 510 }
519 511
520 512 int
521 513 stmf_copyout_iocdata(intptr_t data, int mode, stmf_iocdata_t *iocd, void *obuf)
522 514 {
523 515 int ret;
524 516
525 517 if (iocd->stmf_obuf_size) {
526 518 ret = ddi_copyout(obuf, (void *)(unsigned long)iocd->stmf_obuf,
527 519 iocd->stmf_obuf_size, mode);
528 520 if (ret)
529 521 return (EFAULT);
530 522 }
531 523 ret = ddi_copyout(iocd, (void *)data, sizeof (stmf_iocdata_t), mode);
532 524 if (ret)
533 525 return (EFAULT);
534 526 return (0);
535 527 }
536 528
537 529 /* ARGSUSED */
538 530 static int
539 531 stmf_ioctl(dev_t dev, int cmd, intptr_t data, int mode,
540 532 cred_t *credp, int *rval)
541 533 {
542 534 stmf_iocdata_t *iocd;
543 535 void *ibuf = NULL, *obuf = NULL;
544 536 slist_lu_t *luid_list;
545 537 slist_target_port_t *lportid_list;
546 538 stmf_i_lu_t *ilu;
547 539 stmf_i_local_port_t *ilport;
548 540 stmf_i_scsi_session_t *iss;
549 541 slist_scsi_session_t *iss_list;
550 542 sioc_lu_props_t *lup;
551 543 sioc_target_port_props_t *lportp;
552 544 stmf_ppioctl_data_t *ppi, *ppi_out = NULL;
553 545 uint64_t *ppi_token = NULL;
554 546 uint8_t *p_id, *id;
555 547 stmf_state_desc_t *std;
556 548 stmf_status_t ctl_ret;
557 549 stmf_state_change_info_t ssi;
558 550 int ret = 0;
559 551 uint32_t n;
560 552 int i;
561 553 stmf_group_op_data_t *grp_entry;
562 554 stmf_group_name_t *grpname;
563 555 stmf_view_op_entry_t *ve;
564 556 stmf_id_type_t idtype;
565 557 stmf_id_data_t *id_entry;
566 558 stmf_id_list_t *id_list;
567 559 stmf_view_entry_t *view_entry;
568 560 stmf_set_props_t *stmf_set_props;
569 561 uint32_t veid;
570 562 if ((cmd & 0xff000000) != STMF_IOCTL) {
571 563 return (ENOTTY);
572 564 }
573 565
574 566 if (drv_priv(credp) != 0) {
575 567 return (EPERM);
576 568 }
577 569
578 570 ret = stmf_copyin_iocdata(data, mode, &iocd, &ibuf, &obuf);
579 571 if (ret)
580 572 return (ret);
581 573 iocd->stmf_error = 0;
582 574
583 575 switch (cmd) {
584 576 case STMF_IOCTL_LU_LIST:
585 577 /* retrieves both registered/unregistered */
586 578 mutex_enter(&stmf_state.stmf_lock);
587 579 id_list = &stmf_state.stmf_luid_list;
588 580 n = min(id_list->id_count,
589 581 (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
590 582 iocd->stmf_obuf_max_nentries = id_list->id_count;
591 583 luid_list = (slist_lu_t *)obuf;
592 584 id_entry = id_list->idl_head;
593 585 for (i = 0; i < n; i++) {
594 586 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
595 587 id_entry = id_entry->id_next;
596 588 }
597 589
598 590 n = iocd->stmf_obuf_size/sizeof (slist_lu_t);
599 591 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
600 592 id = (uint8_t *)ilu->ilu_lu->lu_id;
601 593 if (stmf_lookup_id(id_list, 16, id + 4) == NULL) {
602 594 iocd->stmf_obuf_max_nentries++;
603 595 if (i < n) {
604 596 bcopy(id + 4, luid_list[i].lu_guid,
605 597 sizeof (slist_lu_t));
606 598 i++;
607 599 }
608 600 }
609 601 }
610 602 iocd->stmf_obuf_nentries = i;
611 603 mutex_exit(&stmf_state.stmf_lock);
612 604 break;
613 605
614 606 case STMF_IOCTL_REG_LU_LIST:
615 607 mutex_enter(&stmf_state.stmf_lock);
616 608 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlus;
617 609 n = min(stmf_state.stmf_nlus,
618 610 (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
619 611 iocd->stmf_obuf_nentries = n;
620 612 ilu = stmf_state.stmf_ilulist;
621 613 luid_list = (slist_lu_t *)obuf;
622 614 for (i = 0; i < n; i++) {
623 615 uint8_t *id;
624 616 id = (uint8_t *)ilu->ilu_lu->lu_id;
625 617 bcopy(id + 4, luid_list[i].lu_guid, 16);
626 618 ilu = ilu->ilu_next;
627 619 }
628 620 mutex_exit(&stmf_state.stmf_lock);
629 621 break;
630 622
631 623 case STMF_IOCTL_VE_LU_LIST:
632 624 mutex_enter(&stmf_state.stmf_lock);
633 625 id_list = &stmf_state.stmf_luid_list;
634 626 n = min(id_list->id_count,
635 627 (iocd->stmf_obuf_size)/sizeof (slist_lu_t));
636 628 iocd->stmf_obuf_max_nentries = id_list->id_count;
637 629 iocd->stmf_obuf_nentries = n;
638 630 luid_list = (slist_lu_t *)obuf;
639 631 id_entry = id_list->idl_head;
640 632 for (i = 0; i < n; i++) {
641 633 bcopy(id_entry->id_data, luid_list[i].lu_guid, 16);
642 634 id_entry = id_entry->id_next;
643 635 }
644 636 mutex_exit(&stmf_state.stmf_lock);
645 637 break;
646 638
647 639 case STMF_IOCTL_TARGET_PORT_LIST:
648 640 mutex_enter(&stmf_state.stmf_lock);
649 641 iocd->stmf_obuf_max_nentries = stmf_state.stmf_nlports;
650 642 n = min(stmf_state.stmf_nlports,
651 643 (iocd->stmf_obuf_size)/sizeof (slist_target_port_t));
652 644 iocd->stmf_obuf_nentries = n;
653 645 ilport = stmf_state.stmf_ilportlist;
654 646 lportid_list = (slist_target_port_t *)obuf;
655 647 for (i = 0; i < n; i++) {
656 648 uint8_t *id;
657 649 id = (uint8_t *)ilport->ilport_lport->lport_id;
658 650 bcopy(id, lportid_list[i].target, id[3] + 4);
659 651 ilport = ilport->ilport_next;
660 652 }
661 653 mutex_exit(&stmf_state.stmf_lock);
662 654 break;
663 655
664 656 case STMF_IOCTL_SESSION_LIST:
665 657 p_id = (uint8_t *)ibuf;
666 658 if ((p_id == NULL) || (iocd->stmf_ibuf_size < 4) ||
667 659 (iocd->stmf_ibuf_size < (p_id[3] + 4))) {
668 660 ret = EINVAL;
669 661 break;
670 662 }
671 663 mutex_enter(&stmf_state.stmf_lock);
672 664 for (ilport = stmf_state.stmf_ilportlist; ilport; ilport =
673 665 ilport->ilport_next) {
674 666 uint8_t *id;
675 667 id = (uint8_t *)ilport->ilport_lport->lport_id;
676 668 if ((p_id[3] == id[3]) &&
677 669 (bcmp(p_id + 4, id + 4, id[3]) == 0)) {
678 670 break;
679 671 }
680 672 }
681 673 if (ilport == NULL) {
682 674 mutex_exit(&stmf_state.stmf_lock);
683 675 ret = ENOENT;
684 676 break;
685 677 }
686 678 iocd->stmf_obuf_max_nentries = ilport->ilport_nsessions;
687 679 n = min(ilport->ilport_nsessions,
688 680 (iocd->stmf_obuf_size)/sizeof (slist_scsi_session_t));
689 681 iocd->stmf_obuf_nentries = n;
690 682 iss = ilport->ilport_ss_list;
691 683 iss_list = (slist_scsi_session_t *)obuf;
692 684 for (i = 0; i < n; i++) {
693 685 uint8_t *id;
694 686 id = (uint8_t *)iss->iss_ss->ss_rport_id;
695 687 bcopy(id, iss_list[i].initiator, id[3] + 4);
696 688 iss_list[i].creation_time = (uint32_t)
697 689 iss->iss_creation_time;
698 690 if (iss->iss_ss->ss_rport_alias) {
699 691 (void) strncpy(iss_list[i].alias,
700 692 iss->iss_ss->ss_rport_alias, 255);
701 693 iss_list[i].alias[255] = 0;
702 694 } else {
703 695 iss_list[i].alias[0] = 0;
704 696 }
705 697 iss = iss->iss_next;
706 698 }
707 699 mutex_exit(&stmf_state.stmf_lock);
708 700 break;
709 701
710 702 case STMF_IOCTL_GET_LU_PROPERTIES:
711 703 p_id = (uint8_t *)ibuf;
712 704 if ((iocd->stmf_ibuf_size < 16) ||
713 705 (iocd->stmf_obuf_size < sizeof (sioc_lu_props_t)) ||
714 706 (p_id[0] == 0)) {
715 707 ret = EINVAL;
716 708 break;
717 709 }
718 710 mutex_enter(&stmf_state.stmf_lock);
719 711 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
720 712 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
721 713 break;
722 714 }
723 715 if (ilu == NULL) {
724 716 mutex_exit(&stmf_state.stmf_lock);
725 717 ret = ENOENT;
726 718 break;
727 719 }
728 720 lup = (sioc_lu_props_t *)obuf;
729 721 bcopy(ilu->ilu_lu->lu_id->ident, lup->lu_guid, 16);
730 722 lup->lu_state = ilu->ilu_state & 0x0f;
731 723 lup->lu_present = 1; /* XXX */
732 724 (void) strncpy(lup->lu_provider_name,
733 725 ilu->ilu_lu->lu_lp->lp_name, 255);
734 726 lup->lu_provider_name[254] = 0;
735 727 if (ilu->ilu_lu->lu_alias) {
736 728 (void) strncpy(lup->lu_alias,
737 729 ilu->ilu_lu->lu_alias, 255);
738 730 lup->lu_alias[255] = 0;
739 731 } else {
740 732 lup->lu_alias[0] = 0;
741 733 }
742 734 mutex_exit(&stmf_state.stmf_lock);
743 735 break;
744 736
745 737 case STMF_IOCTL_GET_TARGET_PORT_PROPERTIES:
746 738 p_id = (uint8_t *)ibuf;
747 739 if ((p_id == NULL) ||
748 740 (iocd->stmf_ibuf_size < (p_id[3] + 4)) ||
749 741 (iocd->stmf_obuf_size <
750 742 sizeof (sioc_target_port_props_t))) {
751 743 ret = EINVAL;
752 744 break;
753 745 }
754 746 mutex_enter(&stmf_state.stmf_lock);
755 747 for (ilport = stmf_state.stmf_ilportlist; ilport;
756 748 ilport = ilport->ilport_next) {
757 749 uint8_t *id;
758 750 id = (uint8_t *)ilport->ilport_lport->lport_id;
759 751 if ((p_id[3] == id[3]) &&
760 752 (bcmp(p_id+4, id+4, id[3]) == 0))
761 753 break;
762 754 }
763 755 if (ilport == NULL) {
764 756 mutex_exit(&stmf_state.stmf_lock);
765 757 ret = ENOENT;
766 758 break;
767 759 }
768 760 lportp = (sioc_target_port_props_t *)obuf;
769 761 bcopy(ilport->ilport_lport->lport_id, lportp->tgt_id,
770 762 ilport->ilport_lport->lport_id->ident_length + 4);
771 763 lportp->tgt_state = ilport->ilport_state & 0x0f;
772 764 lportp->tgt_present = 1; /* XXX */
773 765 (void) strncpy(lportp->tgt_provider_name,
774 766 ilport->ilport_lport->lport_pp->pp_name, 255);
775 767 lportp->tgt_provider_name[254] = 0;
776 768 if (ilport->ilport_lport->lport_alias) {
777 769 (void) strncpy(lportp->tgt_alias,
778 770 ilport->ilport_lport->lport_alias, 255);
779 771 lportp->tgt_alias[255] = 0;
780 772 } else {
781 773 lportp->tgt_alias[0] = 0;
782 774 }
783 775 mutex_exit(&stmf_state.stmf_lock);
784 776 break;
785 777
786 778 case STMF_IOCTL_SET_STMF_STATE:
787 779 if ((ibuf == NULL) ||
788 780 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
789 781 ret = EINVAL;
790 782 break;
791 783 }
792 784 ret = stmf_set_stmf_state((stmf_state_desc_t *)ibuf);
793 785 break;
794 786
795 787 case STMF_IOCTL_GET_STMF_STATE:
796 788 if ((obuf == NULL) ||
797 789 (iocd->stmf_obuf_size < sizeof (stmf_state_desc_t))) {
798 790 ret = EINVAL;
799 791 break;
800 792 }
801 793 ret = stmf_get_stmf_state((stmf_state_desc_t *)obuf);
802 794 break;
803 795
804 796 case STMF_IOCTL_SET_ALUA_STATE:
805 797 if ((ibuf == NULL) ||
806 798 (iocd->stmf_ibuf_size < sizeof (stmf_alua_state_desc_t))) {
807 799 ret = EINVAL;
808 800 break;
809 801 }
810 802 ret = stmf_set_alua_state((stmf_alua_state_desc_t *)ibuf);
811 803 break;
812 804
813 805 case STMF_IOCTL_GET_ALUA_STATE:
814 806 if ((obuf == NULL) ||
815 807 (iocd->stmf_obuf_size < sizeof (stmf_alua_state_desc_t))) {
816 808 ret = EINVAL;
817 809 break;
818 810 }
819 811 stmf_get_alua_state((stmf_alua_state_desc_t *)obuf);
820 812 break;
821 813
822 814 case STMF_IOCTL_SET_LU_STATE:
823 815 ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
824 816 ssi.st_additional_info = NULL;
825 817 std = (stmf_state_desc_t *)ibuf;
826 818 if ((ibuf == NULL) ||
827 819 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
828 820 ret = EINVAL;
829 821 break;
830 822 }
831 823 p_id = std->ident;
832 824 mutex_enter(&stmf_state.stmf_lock);
833 825 if (stmf_state.stmf_inventory_locked) {
834 826 mutex_exit(&stmf_state.stmf_lock);
835 827 ret = EBUSY;
836 828 break;
837 829 }
838 830 for (ilu = stmf_state.stmf_ilulist; ilu; ilu = ilu->ilu_next) {
839 831 if (bcmp(p_id, ilu->ilu_lu->lu_id->ident, 16) == 0)
840 832 break;
841 833 }
842 834 if (ilu == NULL) {
843 835 mutex_exit(&stmf_state.stmf_lock);
844 836 ret = ENOENT;
845 837 break;
846 838 }
847 839 stmf_state.stmf_inventory_locked = 1;
848 840 mutex_exit(&stmf_state.stmf_lock);
849 841 cmd = (std->state == STMF_STATE_ONLINE) ? STMF_CMD_LU_ONLINE :
850 842 STMF_CMD_LU_OFFLINE;
851 843 ctl_ret = stmf_ctl(cmd, (void *)ilu->ilu_lu, &ssi);
852 844 if (ctl_ret == STMF_ALREADY)
853 845 ret = 0;
854 846 else if (ctl_ret == STMF_BUSY)
855 847 ret = EBUSY;
856 848 else if (ctl_ret != STMF_SUCCESS)
857 849 ret = EIO;
858 850 mutex_enter(&stmf_state.stmf_lock);
859 851 stmf_state.stmf_inventory_locked = 0;
860 852 mutex_exit(&stmf_state.stmf_lock);
861 853 break;
862 854
863 855 case STMF_IOCTL_SET_STMF_PROPS:
864 856 if ((ibuf == NULL) ||
865 857 (iocd->stmf_ibuf_size < sizeof (stmf_set_props_t))) {
866 858 ret = EINVAL;
867 859 break;
868 860 }
869 861 stmf_set_props = (stmf_set_props_t *)ibuf;
870 862 mutex_enter(&stmf_state.stmf_lock);
871 863 if ((stmf_set_props->default_lu_state_value ==
872 864 STMF_STATE_OFFLINE) ||
873 865 (stmf_set_props->default_lu_state_value ==
874 866 STMF_STATE_ONLINE)) {
875 867 stmf_state.stmf_default_lu_state =
876 868 stmf_set_props->default_lu_state_value;
877 869 }
878 870 if ((stmf_set_props->default_target_state_value ==
879 871 STMF_STATE_OFFLINE) ||
880 872 (stmf_set_props->default_target_state_value ==
881 873 STMF_STATE_ONLINE)) {
882 874 stmf_state.stmf_default_lport_state =
883 875 stmf_set_props->default_target_state_value;
884 876 }
885 877
886 878 mutex_exit(&stmf_state.stmf_lock);
887 879 break;
888 880
889 881 case STMF_IOCTL_SET_TARGET_PORT_STATE:
890 882 ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
891 883 ssi.st_additional_info = NULL;
892 884 std = (stmf_state_desc_t *)ibuf;
893 885 if ((ibuf == NULL) ||
894 886 (iocd->stmf_ibuf_size < sizeof (stmf_state_desc_t))) {
895 887 ret = EINVAL;
896 888 break;
897 889 }
898 890 p_id = std->ident;
899 891 mutex_enter(&stmf_state.stmf_lock);
900 892 if (stmf_state.stmf_inventory_locked) {
901 893 mutex_exit(&stmf_state.stmf_lock);
902 894 ret = EBUSY;
903 895 break;
904 896 }
905 897 for (ilport = stmf_state.stmf_ilportlist; ilport;
906 898 ilport = ilport->ilport_next) {
907 899 uint8_t *id;
908 900 id = (uint8_t *)ilport->ilport_lport->lport_id;
909 901 if ((id[3] == p_id[3]) &&
910 902 (bcmp(id+4, p_id+4, id[3]) == 0)) {
911 903 break;
912 904 }
913 905 }
914 906 if (ilport == NULL) {
915 907 mutex_exit(&stmf_state.stmf_lock);
916 908 ret = ENOENT;
917 909 break;
918 910 }
919 911 stmf_state.stmf_inventory_locked = 1;
920 912 mutex_exit(&stmf_state.stmf_lock);
921 913 cmd = (std->state == STMF_STATE_ONLINE) ?
922 914 STMF_CMD_LPORT_ONLINE : STMF_CMD_LPORT_OFFLINE;
923 915 ctl_ret = stmf_ctl(cmd, (void *)ilport->ilport_lport, &ssi);
924 916 if (ctl_ret == STMF_ALREADY)
925 917 ret = 0;
926 918 else if (ctl_ret == STMF_BUSY)
927 919 ret = EBUSY;
928 920 else if (ctl_ret != STMF_SUCCESS)
929 921 ret = EIO;
930 922 mutex_enter(&stmf_state.stmf_lock);
931 923 stmf_state.stmf_inventory_locked = 0;
932 924 mutex_exit(&stmf_state.stmf_lock);
933 925 break;
934 926
935 927 case STMF_IOCTL_ADD_HG_ENTRY:
936 928 idtype = STMF_ID_TYPE_HOST;
937 929 /* FALLTHROUGH */
938 930 case STMF_IOCTL_ADD_TG_ENTRY:
939 931 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
940 932 ret = EACCES;
941 933 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
942 934 break;
943 935 }
944 936 if (cmd == STMF_IOCTL_ADD_TG_ENTRY) {
945 937 idtype = STMF_ID_TYPE_TARGET;
946 938 }
947 939 grp_entry = (stmf_group_op_data_t *)ibuf;
948 940 if ((ibuf == NULL) ||
949 941 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
950 942 ret = EINVAL;
951 943 break;
952 944 }
953 945 if (grp_entry->group.name[0] == '*') {
954 946 ret = EINVAL;
955 947 break; /* not allowed */
956 948 }
957 949 mutex_enter(&stmf_state.stmf_lock);
958 950 ret = stmf_add_group_member(grp_entry->group.name,
959 951 grp_entry->group.name_size,
960 952 grp_entry->ident + 4,
961 953 grp_entry->ident[3],
962 954 idtype,
963 955 &iocd->stmf_error);
964 956 mutex_exit(&stmf_state.stmf_lock);
965 957 break;
966 958 case STMF_IOCTL_REMOVE_HG_ENTRY:
967 959 idtype = STMF_ID_TYPE_HOST;
968 960 /* FALLTHROUGH */
969 961 case STMF_IOCTL_REMOVE_TG_ENTRY:
970 962 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
971 963 ret = EACCES;
972 964 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
973 965 break;
974 966 }
975 967 if (cmd == STMF_IOCTL_REMOVE_TG_ENTRY) {
976 968 idtype = STMF_ID_TYPE_TARGET;
977 969 }
978 970 grp_entry = (stmf_group_op_data_t *)ibuf;
979 971 if ((ibuf == NULL) ||
980 972 (iocd->stmf_ibuf_size < sizeof (stmf_group_op_data_t))) {
981 973 ret = EINVAL;
982 974 break;
983 975 }
984 976 if (grp_entry->group.name[0] == '*') {
985 977 ret = EINVAL;
986 978 break; /* not allowed */
987 979 }
988 980 mutex_enter(&stmf_state.stmf_lock);
989 981 ret = stmf_remove_group_member(grp_entry->group.name,
990 982 grp_entry->group.name_size,
991 983 grp_entry->ident + 4,
992 984 grp_entry->ident[3],
993 985 idtype,
994 986 &iocd->stmf_error);
995 987 mutex_exit(&stmf_state.stmf_lock);
996 988 break;
997 989 case STMF_IOCTL_CREATE_HOST_GROUP:
998 990 idtype = STMF_ID_TYPE_HOST_GROUP;
999 991 /* FALLTHROUGH */
1000 992 case STMF_IOCTL_CREATE_TARGET_GROUP:
1001 993 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1002 994 ret = EACCES;
1003 995 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1004 996 break;
1005 997 }
1006 998 grpname = (stmf_group_name_t *)ibuf;
1007 999
1008 1000 if (cmd == STMF_IOCTL_CREATE_TARGET_GROUP)
1009 1001 idtype = STMF_ID_TYPE_TARGET_GROUP;
1010 1002 if ((ibuf == NULL) ||
1011 1003 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1012 1004 ret = EINVAL;
1013 1005 break;
1014 1006 }
1015 1007 if (grpname->name[0] == '*') {
1016 1008 ret = EINVAL;
1017 1009 break; /* not allowed */
1018 1010 }
1019 1011 mutex_enter(&stmf_state.stmf_lock);
1020 1012 ret = stmf_add_group(grpname->name,
1021 1013 grpname->name_size, idtype, &iocd->stmf_error);
1022 1014 mutex_exit(&stmf_state.stmf_lock);
1023 1015 break;
1024 1016 case STMF_IOCTL_REMOVE_HOST_GROUP:
1025 1017 idtype = STMF_ID_TYPE_HOST_GROUP;
1026 1018 /* FALLTHROUGH */
1027 1019 case STMF_IOCTL_REMOVE_TARGET_GROUP:
1028 1020 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1029 1021 ret = EACCES;
1030 1022 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1031 1023 break;
1032 1024 }
1033 1025 grpname = (stmf_group_name_t *)ibuf;
1034 1026 if (cmd == STMF_IOCTL_REMOVE_TARGET_GROUP)
1035 1027 idtype = STMF_ID_TYPE_TARGET_GROUP;
1036 1028 if ((ibuf == NULL) ||
1037 1029 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1038 1030 ret = EINVAL;
1039 1031 break;
1040 1032 }
1041 1033 if (grpname->name[0] == '*') {
1042 1034 ret = EINVAL;
1043 1035 break; /* not allowed */
1044 1036 }
1045 1037 mutex_enter(&stmf_state.stmf_lock);
1046 1038 ret = stmf_remove_group(grpname->name,
1047 1039 grpname->name_size, idtype, &iocd->stmf_error);
1048 1040 mutex_exit(&stmf_state.stmf_lock);
1049 1041 break;
1050 1042 case STMF_IOCTL_VALIDATE_VIEW:
1051 1043 case STMF_IOCTL_ADD_VIEW_ENTRY:
1052 1044 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1053 1045 ret = EACCES;
1054 1046 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1055 1047 break;
1056 1048 }
1057 1049 ve = (stmf_view_op_entry_t *)ibuf;
1058 1050 if ((ibuf == NULL) ||
1059 1051 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1060 1052 ret = EINVAL;
1061 1053 break;
1062 1054 }
1063 1055 if (!ve->ve_lu_number_valid)
1064 1056 ve->ve_lu_nbr[2] = 0xFF;
1065 1057 if (ve->ve_all_hosts) {
1066 1058 ve->ve_host_group.name[0] = '*';
1067 1059 ve->ve_host_group.name_size = 1;
1068 1060 }
1069 1061 if (ve->ve_all_targets) {
1070 1062 ve->ve_target_group.name[0] = '*';
1071 1063 ve->ve_target_group.name_size = 1;
1072 1064 }
1073 1065 if (ve->ve_ndx_valid)
1074 1066 veid = ve->ve_ndx;
1075 1067 else
1076 1068 veid = 0xffffffff;
1077 1069 mutex_enter(&stmf_state.stmf_lock);
1078 1070 if (cmd == STMF_IOCTL_ADD_VIEW_ENTRY) {
1079 1071 ret = stmf_add_ve(ve->ve_host_group.name,
1080 1072 ve->ve_host_group.name_size,
1081 1073 ve->ve_target_group.name,
1082 1074 ve->ve_target_group.name_size,
1083 1075 ve->ve_guid,
1084 1076 &veid,
1085 1077 ve->ve_lu_nbr,
1086 1078 &iocd->stmf_error);
1087 1079 } else { /* STMF_IOCTL_VALIDATE_VIEW */
1088 1080 ret = stmf_validate_lun_ve(ve->ve_host_group.name,
1089 1081 ve->ve_host_group.name_size,
1090 1082 ve->ve_target_group.name,
1091 1083 ve->ve_target_group.name_size,
1092 1084 ve->ve_lu_nbr,
1093 1085 &iocd->stmf_error);
1094 1086 }
1095 1087 mutex_exit(&stmf_state.stmf_lock);
1096 1088 if (ret == 0 &&
1097 1089 (!ve->ve_ndx_valid || !ve->ve_lu_number_valid) &&
1098 1090 iocd->stmf_obuf_size >= sizeof (stmf_view_op_entry_t)) {
1099 1091 stmf_view_op_entry_t *ve_ret =
1100 1092 (stmf_view_op_entry_t *)obuf;
1101 1093 iocd->stmf_obuf_nentries = 1;
1102 1094 iocd->stmf_obuf_max_nentries = 1;
1103 1095 if (!ve->ve_ndx_valid) {
1104 1096 ve_ret->ve_ndx = veid;
1105 1097 ve_ret->ve_ndx_valid = 1;
1106 1098 }
1107 1099 if (!ve->ve_lu_number_valid) {
1108 1100 ve_ret->ve_lu_number_valid = 1;
1109 1101 bcopy(ve->ve_lu_nbr, ve_ret->ve_lu_nbr, 8);
1110 1102 }
1111 1103 }
1112 1104 break;
1113 1105 case STMF_IOCTL_REMOVE_VIEW_ENTRY:
1114 1106 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1115 1107 ret = EACCES;
1116 1108 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1117 1109 break;
1118 1110 }
1119 1111 ve = (stmf_view_op_entry_t *)ibuf;
1120 1112 if ((ibuf == NULL) ||
1121 1113 (iocd->stmf_ibuf_size < sizeof (stmf_view_op_entry_t))) {
1122 1114 ret = EINVAL;
1123 1115 break;
1124 1116 }
1125 1117 if (!ve->ve_ndx_valid) {
1126 1118 ret = EINVAL;
1127 1119 break;
1128 1120 }
1129 1121 mutex_enter(&stmf_state.stmf_lock);
1130 1122 ret = stmf_remove_ve_by_id(ve->ve_guid, ve->ve_ndx,
1131 1123 &iocd->stmf_error);
1132 1124 mutex_exit(&stmf_state.stmf_lock);
1133 1125 break;
1134 1126 case STMF_IOCTL_GET_HG_LIST:
1135 1127 id_list = &stmf_state.stmf_hg_list;
1136 1128 /* FALLTHROUGH */
1137 1129 case STMF_IOCTL_GET_TG_LIST:
1138 1130 if (cmd == STMF_IOCTL_GET_TG_LIST)
1139 1131 id_list = &stmf_state.stmf_tg_list;
1140 1132 mutex_enter(&stmf_state.stmf_lock);
1141 1133 iocd->stmf_obuf_max_nentries = id_list->id_count;
1142 1134 n = min(id_list->id_count,
1143 1135 (iocd->stmf_obuf_size)/sizeof (stmf_group_name_t));
1144 1136 iocd->stmf_obuf_nentries = n;
1145 1137 id_entry = id_list->idl_head;
1146 1138 grpname = (stmf_group_name_t *)obuf;
1147 1139 for (i = 0; i < n; i++) {
1148 1140 if (id_entry->id_data[0] == '*') {
1149 1141 if (iocd->stmf_obuf_nentries > 0) {
1150 1142 iocd->stmf_obuf_nentries--;
1151 1143 }
1152 1144 id_entry = id_entry->id_next;
1153 1145 continue;
1154 1146 }
1155 1147 grpname->name_size = id_entry->id_data_size;
1156 1148 bcopy(id_entry->id_data, grpname->name,
1157 1149 id_entry->id_data_size);
1158 1150 grpname++;
1159 1151 id_entry = id_entry->id_next;
1160 1152 }
1161 1153 mutex_exit(&stmf_state.stmf_lock);
1162 1154 break;
1163 1155 case STMF_IOCTL_GET_HG_ENTRIES:
1164 1156 id_list = &stmf_state.stmf_hg_list;
1165 1157 /* FALLTHROUGH */
1166 1158 case STMF_IOCTL_GET_TG_ENTRIES:
1167 1159 grpname = (stmf_group_name_t *)ibuf;
1168 1160 if ((ibuf == NULL) ||
1169 1161 (iocd->stmf_ibuf_size < sizeof (stmf_group_name_t))) {
1170 1162 ret = EINVAL;
1171 1163 break;
1172 1164 }
1173 1165 if (cmd == STMF_IOCTL_GET_TG_ENTRIES) {
1174 1166 id_list = &stmf_state.stmf_tg_list;
1175 1167 }
1176 1168 mutex_enter(&stmf_state.stmf_lock);
1177 1169 id_entry = stmf_lookup_id(id_list, grpname->name_size,
1178 1170 grpname->name);
1179 1171 if (!id_entry)
1180 1172 ret = ENODEV;
1181 1173 else {
1182 1174 stmf_ge_ident_t *grp_entry;
1183 1175 id_list = (stmf_id_list_t *)id_entry->id_impl_specific;
1184 1176 iocd->stmf_obuf_max_nentries = id_list->id_count;
1185 1177 n = min(id_list->id_count,
1186 1178 iocd->stmf_obuf_size/sizeof (stmf_ge_ident_t));
1187 1179 iocd->stmf_obuf_nentries = n;
1188 1180 id_entry = id_list->idl_head;
1189 1181 grp_entry = (stmf_ge_ident_t *)obuf;
1190 1182 for (i = 0; i < n; i++) {
1191 1183 bcopy(id_entry->id_data, grp_entry->ident,
1192 1184 id_entry->id_data_size);
1193 1185 grp_entry->ident_size = id_entry->id_data_size;
1194 1186 id_entry = id_entry->id_next;
1195 1187 grp_entry++;
1196 1188 }
1197 1189 }
1198 1190 mutex_exit(&stmf_state.stmf_lock);
1199 1191 break;
1200 1192
1201 1193 case STMF_IOCTL_GET_VE_LIST:
1202 1194 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1203 1195 mutex_enter(&stmf_state.stmf_lock);
1204 1196 ve = (stmf_view_op_entry_t *)obuf;
1205 1197 for (id_entry = stmf_state.stmf_luid_list.idl_head;
1206 1198 id_entry; id_entry = id_entry->id_next) {
1207 1199 for (view_entry = (stmf_view_entry_t *)
1208 1200 id_entry->id_impl_specific; view_entry;
1209 1201 view_entry = view_entry->ve_next) {
1210 1202 iocd->stmf_obuf_max_nentries++;
1211 1203 if (iocd->stmf_obuf_nentries >= n)
1212 1204 continue;
1213 1205 ve->ve_ndx_valid = 1;
1214 1206 ve->ve_ndx = view_entry->ve_id;
1215 1207 ve->ve_lu_number_valid = 1;
1216 1208 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1217 1209 bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1218 1210 view_entry->ve_luid->id_data_size);
1219 1211 if (view_entry->ve_hg->id_data[0] == '*') {
1220 1212 ve->ve_all_hosts = 1;
1221 1213 } else {
1222 1214 bcopy(view_entry->ve_hg->id_data,
1223 1215 ve->ve_host_group.name,
1224 1216 view_entry->ve_hg->id_data_size);
1225 1217 ve->ve_host_group.name_size =
1226 1218 view_entry->ve_hg->id_data_size;
1227 1219 }
1228 1220
1229 1221 if (view_entry->ve_tg->id_data[0] == '*') {
1230 1222 ve->ve_all_targets = 1;
1231 1223 } else {
1232 1224 bcopy(view_entry->ve_tg->id_data,
1233 1225 ve->ve_target_group.name,
1234 1226 view_entry->ve_tg->id_data_size);
1235 1227 ve->ve_target_group.name_size =
1236 1228 view_entry->ve_tg->id_data_size;
1237 1229 }
1238 1230 ve++;
1239 1231 iocd->stmf_obuf_nentries++;
1240 1232 }
1241 1233 }
1242 1234 mutex_exit(&stmf_state.stmf_lock);
1243 1235 break;
1244 1236
1245 1237 case STMF_IOCTL_LU_VE_LIST:
1246 1238 p_id = (uint8_t *)ibuf;
1247 1239 if ((iocd->stmf_ibuf_size != 16) ||
1248 1240 (iocd->stmf_obuf_size < sizeof (stmf_view_op_entry_t))) {
1249 1241 ret = EINVAL;
1250 1242 break;
1251 1243 }
1252 1244
1253 1245 n = iocd->stmf_obuf_size/sizeof (stmf_view_op_entry_t);
1254 1246 mutex_enter(&stmf_state.stmf_lock);
1255 1247 ve = (stmf_view_op_entry_t *)obuf;
1256 1248 for (id_entry = stmf_state.stmf_luid_list.idl_head;
1257 1249 id_entry; id_entry = id_entry->id_next) {
1258 1250 if (bcmp(id_entry->id_data, p_id, 16) != 0)
1259 1251 continue;
1260 1252 for (view_entry = (stmf_view_entry_t *)
1261 1253 id_entry->id_impl_specific; view_entry;
1262 1254 view_entry = view_entry->ve_next) {
1263 1255 iocd->stmf_obuf_max_nentries++;
1264 1256 if (iocd->stmf_obuf_nentries >= n)
1265 1257 continue;
1266 1258 ve->ve_ndx_valid = 1;
1267 1259 ve->ve_ndx = view_entry->ve_id;
1268 1260 ve->ve_lu_number_valid = 1;
1269 1261 bcopy(view_entry->ve_lun, ve->ve_lu_nbr, 8);
1270 1262 bcopy(view_entry->ve_luid->id_data, ve->ve_guid,
1271 1263 view_entry->ve_luid->id_data_size);
1272 1264 if (view_entry->ve_hg->id_data[0] == '*') {
1273 1265 ve->ve_all_hosts = 1;
1274 1266 } else {
1275 1267 bcopy(view_entry->ve_hg->id_data,
1276 1268 ve->ve_host_group.name,
1277 1269 view_entry->ve_hg->id_data_size);
1278 1270 ve->ve_host_group.name_size =
1279 1271 view_entry->ve_hg->id_data_size;
1280 1272 }
1281 1273
1282 1274 if (view_entry->ve_tg->id_data[0] == '*') {
1283 1275 ve->ve_all_targets = 1;
1284 1276 } else {
1285 1277 bcopy(view_entry->ve_tg->id_data,
1286 1278 ve->ve_target_group.name,
1287 1279 view_entry->ve_tg->id_data_size);
1288 1280 ve->ve_target_group.name_size =
1289 1281 view_entry->ve_tg->id_data_size;
1290 1282 }
1291 1283 ve++;
1292 1284 iocd->stmf_obuf_nentries++;
1293 1285 }
1294 1286 break;
1295 1287 }
1296 1288 mutex_exit(&stmf_state.stmf_lock);
1297 1289 break;
1298 1290
1299 1291 case STMF_IOCTL_LOAD_PP_DATA:
1300 1292 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1301 1293 ret = EACCES;
1302 1294 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1303 1295 break;
1304 1296 }
1305 1297 ppi = (stmf_ppioctl_data_t *)ibuf;
1306 1298 if ((ppi == NULL) ||
1307 1299 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1308 1300 ret = EINVAL;
1309 1301 break;
1310 1302 }
1311 1303 /* returned token */
1312 1304 ppi_token = (uint64_t *)obuf;
1313 1305 if ((ppi_token == NULL) ||
1314 1306 (iocd->stmf_obuf_size < sizeof (uint64_t))) {
1315 1307 ret = EINVAL;
1316 1308 break;
1317 1309 }
1318 1310 ret = stmf_load_ppd_ioctl(ppi, ppi_token, &iocd->stmf_error);
1319 1311 break;
1320 1312
1321 1313 case STMF_IOCTL_GET_PP_DATA:
1322 1314 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1323 1315 ret = EACCES;
1324 1316 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1325 1317 break;
1326 1318 }
1327 1319 ppi = (stmf_ppioctl_data_t *)ibuf;
1328 1320 if (ppi == NULL ||
1329 1321 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1330 1322 ret = EINVAL;
1331 1323 break;
1332 1324 }
1333 1325 ppi_out = (stmf_ppioctl_data_t *)obuf;
1334 1326 if ((ppi_out == NULL) ||
1335 1327 (iocd->stmf_obuf_size < sizeof (stmf_ppioctl_data_t))) {
1336 1328 ret = EINVAL;
1337 1329 break;
1338 1330 }
1339 1331 ret = stmf_get_ppd_ioctl(ppi, ppi_out, &iocd->stmf_error);
1340 1332 break;
1341 1333
1342 1334 case STMF_IOCTL_CLEAR_PP_DATA:
1343 1335 if (stmf_state.stmf_config_state == STMF_CONFIG_NONE) {
1344 1336 ret = EACCES;
1345 1337 iocd->stmf_error = STMF_IOCERR_UPDATE_NEED_CFG_INIT;
1346 1338 break;
1347 1339 }
1348 1340 ppi = (stmf_ppioctl_data_t *)ibuf;
1349 1341 if ((ppi == NULL) ||
1350 1342 (iocd->stmf_ibuf_size < sizeof (stmf_ppioctl_data_t))) {
1351 1343 ret = EINVAL;
1352 1344 break;
1353 1345 }
1354 1346 ret = stmf_delete_ppd_ioctl(ppi);
1355 1347 break;
1356 1348
1357 1349 case STMF_IOCTL_CLEAR_TRACE:
1358 1350 stmf_trace_clear();
1359 1351 break;
1360 1352
1361 1353 case STMF_IOCTL_ADD_TRACE:
1362 1354 if (iocd->stmf_ibuf_size && ibuf) {
1363 1355 ((uint8_t *)ibuf)[iocd->stmf_ibuf_size - 1] = 0;
1364 1356 stmf_trace("\nstradm", "%s\n", ibuf);
1365 1357 }
1366 1358 break;
1367 1359
1368 1360 case STMF_IOCTL_GET_TRACE_POSITION:
1369 1361 if (obuf && (iocd->stmf_obuf_size > 3)) {
1370 1362 mutex_enter(&trace_buf_lock);
1371 1363 *((int *)obuf) = trace_buf_curndx;
1372 1364 mutex_exit(&trace_buf_lock);
1373 1365 } else {
1374 1366 ret = EINVAL;
1375 1367 }
1376 1368 break;
1377 1369
1378 1370 case STMF_IOCTL_GET_TRACE:
1379 1371 if ((iocd->stmf_obuf_size == 0) || (iocd->stmf_ibuf_size < 4)) {
1380 1372 ret = EINVAL;
1381 1373 break;
1382 1374 }
1383 1375 i = *((int *)ibuf);
1384 1376 if ((i > trace_buf_size) || ((i + iocd->stmf_obuf_size) >
1385 1377 trace_buf_size)) {
1386 1378 ret = EINVAL;
1387 1379 break;
1388 1380 }
1389 1381 mutex_enter(&trace_buf_lock);
1390 1382 bcopy(stmf_trace_buf + i, obuf, iocd->stmf_obuf_size);
1391 1383 mutex_exit(&trace_buf_lock);
1392 1384 break;
1393 1385
1394 1386 default:
1395 1387 ret = ENOTTY;
1396 1388 }
1397 1389
1398 1390 if (ret == 0) {
1399 1391 ret = stmf_copyout_iocdata(data, mode, iocd, obuf);
1400 1392 } else if (iocd->stmf_error) {
1401 1393 (void) stmf_copyout_iocdata(data, mode, iocd, obuf);
1402 1394 }
1403 1395 if (obuf) {
1404 1396 kmem_free(obuf, iocd->stmf_obuf_size);
1405 1397 obuf = NULL;
1406 1398 }
1407 1399 if (ibuf) {
1408 1400 kmem_free(ibuf, iocd->stmf_ibuf_size);
1409 1401 ibuf = NULL;
1410 1402 }
1411 1403 kmem_free(iocd, sizeof (stmf_iocdata_t));
1412 1404 return (ret);
1413 1405 }
1414 1406
1415 1407 static int
1416 1408 stmf_get_service_state()
1417 1409 {
1418 1410 stmf_i_local_port_t *ilport;
1419 1411 stmf_i_lu_t *ilu;
1420 1412 int online = 0;
1421 1413 int offline = 0;
1422 1414 int onlining = 0;
1423 1415 int offlining = 0;
1424 1416
1425 1417 ASSERT(mutex_owned(&stmf_state.stmf_lock));
1426 1418 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1427 1419 ilport = ilport->ilport_next) {
1428 1420 if (ilport->ilport_state == STMF_STATE_OFFLINE)
1429 1421 offline++;
1430 1422 else if (ilport->ilport_state == STMF_STATE_ONLINE)
1431 1423 online++;
1432 1424 else if (ilport->ilport_state == STMF_STATE_ONLINING)
1433 1425 onlining++;
1434 1426 else if (ilport->ilport_state == STMF_STATE_OFFLINING)
1435 1427 offlining++;
1436 1428 }
1437 1429
1438 1430 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1439 1431 ilu = ilu->ilu_next) {
1440 1432 if (ilu->ilu_state == STMF_STATE_OFFLINE)
1441 1433 offline++;
1442 1434 else if (ilu->ilu_state == STMF_STATE_ONLINE)
1443 1435 online++;
1444 1436 else if (ilu->ilu_state == STMF_STATE_ONLINING)
1445 1437 onlining++;
1446 1438 else if (ilu->ilu_state == STMF_STATE_OFFLINING)
1447 1439 offlining++;
1448 1440 }
1449 1441
1450 1442 if (stmf_state.stmf_service_running) {
1451 1443 if (onlining)
1452 1444 return (STMF_STATE_ONLINING);
1453 1445 else
1454 1446 return (STMF_STATE_ONLINE);
1455 1447 }
1456 1448
1457 1449 if (offlining) {
1458 1450 return (STMF_STATE_OFFLINING);
1459 1451 }
1460 1452
1461 1453 return (STMF_STATE_OFFLINE);
1462 1454 }
1463 1455
1464 1456 static int
1465 1457 stmf_set_stmf_state(stmf_state_desc_t *std)
1466 1458 {
1467 1459 stmf_i_local_port_t *ilport;
1468 1460 stmf_i_lu_t *ilu;
1469 1461 stmf_state_change_info_t ssi;
1470 1462 int svc_state;
1471 1463
1472 1464 ssi.st_rflags = STMF_RFLAG_USER_REQUEST;
1473 1465 ssi.st_additional_info = NULL;
1474 1466
1475 1467 mutex_enter(&stmf_state.stmf_lock);
1476 1468 if (!stmf_state.stmf_exclusive_open) {
1477 1469 mutex_exit(&stmf_state.stmf_lock);
1478 1470 return (EACCES);
1479 1471 }
1480 1472
1481 1473 if (stmf_state.stmf_inventory_locked) {
1482 1474 mutex_exit(&stmf_state.stmf_lock);
1483 1475 return (EBUSY);
1484 1476 }
1485 1477
1486 1478 if ((std->state != STMF_STATE_ONLINE) &&
1487 1479 (std->state != STMF_STATE_OFFLINE)) {
1488 1480 mutex_exit(&stmf_state.stmf_lock);
1489 1481 return (EINVAL);
1490 1482 }
1491 1483
1492 1484 svc_state = stmf_get_service_state();
1493 1485 if ((svc_state == STMF_STATE_OFFLINING) ||
1494 1486 (svc_state == STMF_STATE_ONLINING)) {
1495 1487 mutex_exit(&stmf_state.stmf_lock);
1496 1488 return (EBUSY);
1497 1489 }
1498 1490
1499 1491 if (svc_state == STMF_STATE_OFFLINE) {
1500 1492 if (std->config_state == STMF_CONFIG_INIT) {
1501 1493 if (std->state != STMF_STATE_OFFLINE) {
1502 1494 mutex_exit(&stmf_state.stmf_lock);
1503 1495 return (EINVAL);
1504 1496 }
1505 1497 stmf_state.stmf_config_state = STMF_CONFIG_INIT;
1506 1498 stmf_delete_all_ppds();
1507 1499 stmf_view_clear_config();
1508 1500 stmf_view_init();
1509 1501 mutex_exit(&stmf_state.stmf_lock);
1510 1502 return (0);
1511 1503 }
1512 1504 if ((stmf_state.stmf_config_state == STMF_CONFIG_INIT) ||
1513 1505 (stmf_state.stmf_config_state == STMF_CONFIG_NONE)) {
1514 1506 if (std->config_state != STMF_CONFIG_INIT_DONE) {
1515 1507 mutex_exit(&stmf_state.stmf_lock);
1516 1508 return (EINVAL);
1517 1509 }
1518 1510 stmf_state.stmf_config_state = STMF_CONFIG_INIT_DONE;
1519 1511 }
1520 1512 if (std->state == STMF_STATE_OFFLINE) {
1521 1513 mutex_exit(&stmf_state.stmf_lock);
1522 1514 return (0);
1523 1515 }
1524 1516 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT) {
1525 1517 mutex_exit(&stmf_state.stmf_lock);
1526 1518 return (EINVAL);
1527 1519 }
1528 1520 stmf_state.stmf_inventory_locked = 1;
1529 1521 stmf_state.stmf_service_running = 1;
1530 1522 mutex_exit(&stmf_state.stmf_lock);
1531 1523
1532 1524 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1533 1525 ilport = ilport->ilport_next) {
1534 1526 if (stmf_state.stmf_default_lport_state !=
1535 1527 STMF_STATE_ONLINE)
1536 1528 continue;
1537 1529 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE,
1538 1530 ilport->ilport_lport, &ssi);
1539 1531 }
1540 1532
1541 1533 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1542 1534 ilu = ilu->ilu_next) {
1543 1535 if (stmf_state.stmf_default_lu_state !=
1544 1536 STMF_STATE_ONLINE)
1545 1537 continue;
1546 1538 (void) stmf_ctl(STMF_CMD_LU_ONLINE, ilu->ilu_lu, &ssi);
1547 1539 }
1548 1540 mutex_enter(&stmf_state.stmf_lock);
1549 1541 stmf_state.stmf_inventory_locked = 0;
1550 1542 mutex_exit(&stmf_state.stmf_lock);
1551 1543 return (0);
1552 1544 }
1553 1545
1554 1546 /* svc_state is STMF_STATE_ONLINE here */
1555 1547 if ((std->state != STMF_STATE_OFFLINE) ||
1556 1548 (std->config_state == STMF_CONFIG_INIT)) {
1557 1549 mutex_exit(&stmf_state.stmf_lock);
1558 1550 return (EACCES);
1559 1551 }
1560 1552
1561 1553 stmf_state.stmf_inventory_locked = 1;
1562 1554 stmf_state.stmf_service_running = 0;
1563 1555
1564 1556 mutex_exit(&stmf_state.stmf_lock);
1565 1557 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1566 1558 ilport = ilport->ilport_next) {
1567 1559 if (ilport->ilport_state != STMF_STATE_ONLINE)
1568 1560 continue;
1569 1561 (void) stmf_ctl(STMF_CMD_LPORT_OFFLINE,
1570 1562 ilport->ilport_lport, &ssi);
1571 1563 }
1572 1564
1573 1565 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
1574 1566 ilu = ilu->ilu_next) {
1575 1567 if (ilu->ilu_state != STMF_STATE_ONLINE)
1576 1568 continue;
1577 1569 (void) stmf_ctl(STMF_CMD_LU_OFFLINE, ilu->ilu_lu, &ssi);
1578 1570 }
1579 1571 mutex_enter(&stmf_state.stmf_lock);
1580 1572 stmf_state.stmf_inventory_locked = 0;
1581 1573 mutex_exit(&stmf_state.stmf_lock);
1582 1574 return (0);
1583 1575 }
1584 1576
|
↓ open down ↓ |
1197 lines elided |
↑ open up ↑ |
1585 1577 static int
1586 1578 stmf_get_stmf_state(stmf_state_desc_t *std)
1587 1579 {
1588 1580 mutex_enter(&stmf_state.stmf_lock);
1589 1581 std->state = stmf_get_service_state();
1590 1582 std->config_state = stmf_state.stmf_config_state;
1591 1583 mutex_exit(&stmf_state.stmf_lock);
1592 1584
1593 1585 return (0);
1594 1586 }
1587 +
1595 1588 /*
1596 1589 * handles registration message from pppt for a logical unit
1597 1590 */
1598 1591 stmf_status_t
1599 1592 stmf_ic_lu_reg(stmf_ic_reg_dereg_lun_msg_t *msg, uint32_t type)
1600 1593 {
1601 1594 stmf_i_lu_provider_t *ilp;
1602 1595 stmf_lu_provider_t *lp;
1603 1596 mutex_enter(&stmf_state.stmf_lock);
1604 1597 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1605 1598 if (strcmp(msg->icrl_lu_provider_name,
1606 1599 ilp->ilp_lp->lp_name) == 0) {
1607 1600 lp = ilp->ilp_lp;
1608 1601 mutex_exit(&stmf_state.stmf_lock);
1609 1602 lp->lp_proxy_msg(msg->icrl_lun_id, msg->icrl_cb_arg,
1610 1603 msg->icrl_cb_arg_len, type);
1611 1604 return (STMF_SUCCESS);
1612 1605 }
1613 1606 }
1614 1607 mutex_exit(&stmf_state.stmf_lock);
1615 1608 return (STMF_SUCCESS);
1616 1609 }
1617 1610
1618 1611 /*
1619 1612 * handles de-registration message from pppt for a logical unit
1620 1613 */
1621 1614 stmf_status_t
1622 1615 stmf_ic_lu_dereg(stmf_ic_reg_dereg_lun_msg_t *msg)
1623 1616 {
1624 1617 stmf_i_lu_provider_t *ilp;
1625 1618 stmf_lu_provider_t *lp;
1626 1619 mutex_enter(&stmf_state.stmf_lock);
1627 1620 for (ilp = stmf_state.stmf_ilplist; ilp != NULL; ilp = ilp->ilp_next) {
1628 1621 if (strcmp(msg->icrl_lu_provider_name,
1629 1622 ilp->ilp_lp->lp_name) == 0) {
1630 1623 lp = ilp->ilp_lp;
1631 1624 mutex_exit(&stmf_state.stmf_lock);
1632 1625 lp->lp_proxy_msg(msg->icrl_lun_id, NULL, 0,
1633 1626 STMF_MSG_LU_DEREGISTER);
1634 1627 return (STMF_SUCCESS);
1635 1628 }
1636 1629 }
1637 1630 mutex_exit(&stmf_state.stmf_lock);
1638 1631 return (STMF_SUCCESS);
1639 1632 }
1640 1633
1641 1634 /*
1642 1635 * helper function to find a task that matches a task_msgid
1643 1636 */
1644 1637 scsi_task_t *
1645 1638 find_task_from_msgid(uint8_t *lu_id, stmf_ic_msgid_t task_msgid)
1646 1639 {
1647 1640 stmf_i_lu_t *ilu;
1648 1641 stmf_i_scsi_task_t *itask;
1649 1642
1650 1643 mutex_enter(&stmf_state.stmf_lock);
1651 1644 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
1652 1645 if (bcmp(lu_id, ilu->ilu_lu->lu_id->ident, 16) == 0) {
1653 1646 break;
1654 1647 }
1655 1648 }
1656 1649
1657 1650 if (ilu == NULL) {
1658 1651 mutex_exit(&stmf_state.stmf_lock);
1659 1652 return (NULL);
1660 1653 }
1661 1654
1662 1655 mutex_enter(&ilu->ilu_task_lock);
1663 1656 for (itask = ilu->ilu_tasks; itask != NULL;
1664 1657 itask = itask->itask_lu_next) {
1665 1658 if (itask->itask_flags & (ITASK_IN_FREE_LIST |
1666 1659 ITASK_BEING_ABORTED)) {
1667 1660 continue;
1668 1661 }
1669 1662 if (itask->itask_proxy_msg_id == task_msgid) {
1670 1663 break;
1671 1664 }
1672 1665 }
1673 1666 mutex_exit(&ilu->ilu_task_lock);
1674 1667 mutex_exit(&stmf_state.stmf_lock);
1675 1668
1676 1669 if (itask != NULL) {
1677 1670 return (itask->itask_task);
1678 1671 } else {
1679 1672 /* task not found. Likely already aborted. */
1680 1673 return (NULL);
1681 1674 }
1682 1675 }
1683 1676
1684 1677 /*
1685 1678 * message received from pppt/ic
1686 1679 */
1687 1680 stmf_status_t
1688 1681 stmf_msg_rx(stmf_ic_msg_t *msg)
1689 1682 {
1690 1683 mutex_enter(&stmf_state.stmf_lock);
1691 1684 if (stmf_state.stmf_alua_state != 1) {
1692 1685 mutex_exit(&stmf_state.stmf_lock);
1693 1686 cmn_err(CE_WARN, "stmf alua state is disabled");
1694 1687 ic_msg_free(msg);
1695 1688 return (STMF_FAILURE);
1696 1689 }
1697 1690 mutex_exit(&stmf_state.stmf_lock);
1698 1691
1699 1692 switch (msg->icm_msg_type) {
1700 1693 case STMF_ICM_REGISTER_LUN:
1701 1694 (void) stmf_ic_lu_reg(
1702 1695 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1703 1696 STMF_MSG_LU_REGISTER);
1704 1697 break;
1705 1698 case STMF_ICM_LUN_ACTIVE:
1706 1699 (void) stmf_ic_lu_reg(
1707 1700 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg,
1708 1701 STMF_MSG_LU_ACTIVE);
1709 1702 break;
1710 1703 case STMF_ICM_DEREGISTER_LUN:
1711 1704 (void) stmf_ic_lu_dereg(
1712 1705 (stmf_ic_reg_dereg_lun_msg_t *)msg->icm_msg);
1713 1706 break;
1714 1707 case STMF_ICM_SCSI_DATA:
1715 1708 (void) stmf_ic_rx_scsi_data(
1716 1709 (stmf_ic_scsi_data_msg_t *)msg->icm_msg);
1717 1710 break;
1718 1711 case STMF_ICM_SCSI_STATUS:
1719 1712 (void) stmf_ic_rx_scsi_status(
1720 1713 (stmf_ic_scsi_status_msg_t *)msg->icm_msg);
1721 1714 break;
1722 1715 case STMF_ICM_STATUS:
1723 1716 (void) stmf_ic_rx_status(
1724 1717 (stmf_ic_status_msg_t *)msg->icm_msg);
1725 1718 break;
1726 1719 default:
1727 1720 cmn_err(CE_WARN, "unknown message received %d",
1728 1721 msg->icm_msg_type);
1729 1722 ic_msg_free(msg);
1730 1723 return (STMF_FAILURE);
1731 1724 }
1732 1725 ic_msg_free(msg);
1733 1726 return (STMF_SUCCESS);
1734 1727 }
1735 1728
1736 1729 stmf_status_t
1737 1730 stmf_ic_rx_status(stmf_ic_status_msg_t *msg)
1738 1731 {
1739 1732 stmf_i_local_port_t *ilport;
1740 1733
1741 1734 if (msg->ics_msg_type != STMF_ICM_REGISTER_PROXY_PORT) {
1742 1735 /* for now, ignore other message status */
1743 1736 return (STMF_SUCCESS);
1744 1737 }
1745 1738
1746 1739 if (msg->ics_status != STMF_SUCCESS) {
1747 1740 return (STMF_SUCCESS);
1748 1741 }
1749 1742
1750 1743 mutex_enter(&stmf_state.stmf_lock);
1751 1744 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
1752 1745 ilport = ilport->ilport_next) {
1753 1746 if (msg->ics_msgid == ilport->ilport_reg_msgid) {
1754 1747 ilport->ilport_proxy_registered = 1;
1755 1748 break;
1756 1749 }
1757 1750 }
1758 1751 mutex_exit(&stmf_state.stmf_lock);
1759 1752 return (STMF_SUCCESS);
1760 1753 }
1761 1754
1762 1755 /*
1763 1756 * handles scsi status message from pppt
1764 1757 */
1765 1758 stmf_status_t
1766 1759 stmf_ic_rx_scsi_status(stmf_ic_scsi_status_msg_t *msg)
1767 1760 {
1768 1761 scsi_task_t *task;
1769 1762
1770 1763 /* is this a task management command */
1771 1764 if (msg->icss_task_msgid & MSG_ID_TM_BIT) {
1772 1765 return (STMF_SUCCESS);
1773 1766 }
1774 1767
1775 1768 task = find_task_from_msgid(msg->icss_lun_id, msg->icss_task_msgid);
1776 1769
1777 1770 if (task == NULL) {
1778 1771 return (STMF_SUCCESS);
1779 1772 }
1780 1773
1781 1774 task->task_scsi_status = msg->icss_status;
1782 1775 task->task_sense_data = msg->icss_sense;
1783 1776 task->task_sense_length = msg->icss_sense_len;
1784 1777 (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE);
1785 1778
1786 1779 return (STMF_SUCCESS);
1787 1780 }
1788 1781
1789 1782 /*
1790 1783 * handles scsi data message from pppt
1791 1784 */
1792 1785 stmf_status_t
1793 1786 stmf_ic_rx_scsi_data(stmf_ic_scsi_data_msg_t *msg)
1794 1787 {
1795 1788 stmf_i_scsi_task_t *itask;
1796 1789 scsi_task_t *task;
1797 1790 stmf_xfer_data_t *xd = NULL;
1798 1791 stmf_data_buf_t *dbuf;
1799 1792 uint32_t sz, minsz, xd_sz, asz;
1800 1793
1801 1794 /* is this a task management command */
1802 1795 if (msg->icsd_task_msgid & MSG_ID_TM_BIT) {
1803 1796 return (STMF_SUCCESS);
1804 1797 }
1805 1798
1806 1799 task = find_task_from_msgid(msg->icsd_lun_id, msg->icsd_task_msgid);
1807 1800 if (task == NULL) {
1808 1801 stmf_ic_msg_t *ic_xfer_done_msg = NULL;
1809 1802 static uint64_t data_msg_id;
1810 1803 stmf_status_t ic_ret = STMF_FAILURE;
1811 1804 mutex_enter(&stmf_state.stmf_lock);
1812 1805 data_msg_id = stmf_proxy_msg_id++;
1813 1806 mutex_exit(&stmf_state.stmf_lock);
1814 1807 /*
1815 1808 * send xfer done status to pppt
1816 1809 * for now, set the session id to 0 as we cannot
1817 1810 * ascertain it since we cannot find the task
1818 1811 */
1819 1812 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc(
1820 1813 msg->icsd_task_msgid, 0, STMF_FAILURE, data_msg_id);
1821 1814 if (ic_xfer_done_msg) {
1822 1815 ic_ret = ic_tx_msg(ic_xfer_done_msg);
1823 1816 if (ic_ret != STMF_IC_MSG_SUCCESS) {
1824 1817 cmn_err(CE_WARN, "unable to xmit proxy msg");
1825 1818 }
1826 1819 }
1827 1820 return (STMF_FAILURE);
1828 1821 }
1829 1822
1830 1823 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
1831 1824 dbuf = itask->itask_proxy_dbuf;
1832 1825
1833 1826 task->task_cmd_xfer_length += msg->icsd_data_len;
1834 1827
1835 1828 if (task->task_additional_flags &
1836 1829 TASK_AF_NO_EXPECTED_XFER_LENGTH) {
1837 1830 task->task_expected_xfer_length =
1838 1831 task->task_cmd_xfer_length;
1839 1832 }
1840 1833
1841 1834 sz = min(task->task_expected_xfer_length,
1842 1835 task->task_cmd_xfer_length);
1843 1836
1844 1837 xd_sz = msg->icsd_data_len;
1845 1838 asz = xd_sz + sizeof (*xd) - 4;
1846 1839 xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP);
1847 1840
1848 1841 if (xd == NULL) {
1849 1842 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1850 1843 STMF_ALLOC_FAILURE, NULL);
1851 1844 return (STMF_FAILURE);
1852 1845 }
1853 1846
1854 1847 xd->alloc_size = asz;
1855 1848 xd->size_left = xd_sz;
1856 1849 bcopy(msg->icsd_data, xd->buf, xd_sz);
1857 1850
1858 1851 sz = min(sz, xd->size_left);
1859 1852 xd->size_left = sz;
1860 1853 minsz = min(512, sz);
1861 1854
1862 1855 if (dbuf == NULL)
1863 1856 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
1864 1857 if (dbuf == NULL) {
1865 1858 kmem_free(xd, xd->alloc_size);
1866 1859 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
1867 1860 STMF_ALLOC_FAILURE, NULL);
1868 1861 return (STMF_FAILURE);
1869 1862 }
1870 1863 dbuf->db_lu_private = xd;
1871 1864 dbuf->db_relative_offset = task->task_nbytes_transferred;
1872 1865 stmf_xd_to_dbuf(dbuf, 0);
1873 1866
1874 1867 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
1875 1868 (void) stmf_xfer_data(task, dbuf, 0);
1876 1869 return (STMF_SUCCESS);
1877 1870 }
1878 1871
1879 1872 stmf_status_t
1880 1873 stmf_proxy_scsi_cmd(scsi_task_t *task, stmf_data_buf_t *dbuf)
1881 1874 {
1882 1875 stmf_i_scsi_task_t *itask =
1883 1876 (stmf_i_scsi_task_t *)task->task_stmf_private;
1884 1877 stmf_i_local_port_t *ilport =
1885 1878 (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
1886 1879 stmf_ic_msg_t *ic_cmd_msg;
1887 1880 stmf_ic_msg_status_t ic_ret;
1888 1881 stmf_status_t ret = STMF_FAILURE;
1889 1882
1890 1883 if (stmf_state.stmf_alua_state != 1) {
1891 1884 cmn_err(CE_WARN, "stmf alua state is disabled");
1892 1885 return (STMF_FAILURE);
1893 1886 }
1894 1887
1895 1888 if (ilport->ilport_proxy_registered == 0) {
1896 1889 return (STMF_FAILURE);
1897 1890 }
1898 1891
1899 1892 mutex_enter(&stmf_state.stmf_lock);
1900 1893 itask->itask_proxy_msg_id = stmf_proxy_msg_id++;
1901 1894 mutex_exit(&stmf_state.stmf_lock);
1902 1895 itask->itask_proxy_dbuf = dbuf;
1903 1896
1904 1897 /*
1905 1898 * stmf will now take over the task handling for this task
1906 1899 * but it still needs to be treated differently from other
1907 1900 * default handled tasks, hence the ITASK_PROXY_TASK.
1908 1901 * If this is a task management function, we're really just
1909 1902 * duping the command to the peer. Set the TM bit so that
1910 1903 * we can recognize this on return since we won't be completing
1911 1904 * the proxied task in that case.
1912 1905 */
1913 1906 if (task->task_mgmt_function) {
1914 1907 itask->itask_proxy_msg_id |= MSG_ID_TM_BIT;
1915 1908 } else {
1916 1909 uint32_t new, old;
1917 1910 do {
1918 1911 new = old = itask->itask_flags;
1919 1912 if (new & ITASK_BEING_ABORTED)
1920 1913 return (STMF_FAILURE);
1921 1914 new |= ITASK_DEFAULT_HANDLING | ITASK_PROXY_TASK;
1922 1915 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
1923 1916 }
1924 1917 if (dbuf) {
1925 1918 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1926 1919 task, dbuf->db_data_size, dbuf->db_sglist[0].seg_addr,
1927 1920 itask->itask_proxy_msg_id);
1928 1921 } else {
1929 1922 ic_cmd_msg = ic_scsi_cmd_msg_alloc(itask->itask_proxy_msg_id,
1930 1923 task, 0, NULL, itask->itask_proxy_msg_id);
1931 1924 }
1932 1925 if (ic_cmd_msg) {
1933 1926 ic_ret = ic_tx_msg(ic_cmd_msg);
1934 1927 if (ic_ret == STMF_IC_MSG_SUCCESS) {
1935 1928 ret = STMF_SUCCESS;
1936 1929 }
1937 1930 }
1938 1931 return (ret);
1939 1932 }
1940 1933
1941 1934
1942 1935 stmf_status_t
1943 1936 pppt_modload()
1944 1937 {
1945 1938 int error;
1946 1939
1947 1940 if (pppt_mod == NULL && ((pppt_mod =
1948 1941 ddi_modopen("drv/pppt", KRTLD_MODE_FIRST, &error)) == NULL)) {
1949 1942 cmn_err(CE_WARN, "Unable to load pppt");
1950 1943 return (STMF_FAILURE);
1951 1944 }
1952 1945
1953 1946 if (ic_reg_port_msg_alloc == NULL && ((ic_reg_port_msg_alloc =
1954 1947 (stmf_ic_reg_port_msg_alloc_func_t)
1955 1948 ddi_modsym(pppt_mod, "stmf_ic_reg_port_msg_alloc",
1956 1949 &error)) == NULL)) {
1957 1950 cmn_err(CE_WARN,
1958 1951 "Unable to find symbol - stmf_ic_reg_port_msg_alloc");
1959 1952 return (STMF_FAILURE);
1960 1953 }
1961 1954
1962 1955
1963 1956 if (ic_dereg_port_msg_alloc == NULL && ((ic_dereg_port_msg_alloc =
1964 1957 (stmf_ic_dereg_port_msg_alloc_func_t)
1965 1958 ddi_modsym(pppt_mod, "stmf_ic_dereg_port_msg_alloc",
1966 1959 &error)) == NULL)) {
1967 1960 cmn_err(CE_WARN,
1968 1961 "Unable to find symbol - stmf_ic_dereg_port_msg_alloc");
1969 1962 return (STMF_FAILURE);
1970 1963 }
1971 1964
1972 1965 if (ic_reg_lun_msg_alloc == NULL && ((ic_reg_lun_msg_alloc =
1973 1966 (stmf_ic_reg_lun_msg_alloc_func_t)
1974 1967 ddi_modsym(pppt_mod, "stmf_ic_reg_lun_msg_alloc",
1975 1968 &error)) == NULL)) {
1976 1969 cmn_err(CE_WARN,
1977 1970 "Unable to find symbol - stmf_ic_reg_lun_msg_alloc");
1978 1971 return (STMF_FAILURE);
1979 1972 }
1980 1973
1981 1974 if (ic_lun_active_msg_alloc == NULL && ((ic_lun_active_msg_alloc =
1982 1975 (stmf_ic_lun_active_msg_alloc_func_t)
1983 1976 ddi_modsym(pppt_mod, "stmf_ic_lun_active_msg_alloc",
1984 1977 &error)) == NULL)) {
1985 1978 cmn_err(CE_WARN,
1986 1979 "Unable to find symbol - stmf_ic_lun_active_msg_alloc");
1987 1980 return (STMF_FAILURE);
1988 1981 }
1989 1982
1990 1983 if (ic_dereg_lun_msg_alloc == NULL && ((ic_dereg_lun_msg_alloc =
1991 1984 (stmf_ic_dereg_lun_msg_alloc_func_t)
1992 1985 ddi_modsym(pppt_mod, "stmf_ic_dereg_lun_msg_alloc",
1993 1986 &error)) == NULL)) {
1994 1987 cmn_err(CE_WARN,
1995 1988 "Unable to find symbol - stmf_ic_dereg_lun_msg_alloc");
1996 1989 return (STMF_FAILURE);
1997 1990 }
1998 1991
1999 1992 if (ic_scsi_cmd_msg_alloc == NULL && ((ic_scsi_cmd_msg_alloc =
2000 1993 (stmf_ic_scsi_cmd_msg_alloc_func_t)
2001 1994 ddi_modsym(pppt_mod, "stmf_ic_scsi_cmd_msg_alloc",
2002 1995 &error)) == NULL)) {
2003 1996 cmn_err(CE_WARN,
2004 1997 "Unable to find symbol - stmf_ic_scsi_cmd_msg_alloc");
2005 1998 return (STMF_FAILURE);
2006 1999 }
2007 2000
2008 2001 if (ic_scsi_data_xfer_done_msg_alloc == NULL &&
2009 2002 ((ic_scsi_data_xfer_done_msg_alloc =
2010 2003 (stmf_ic_scsi_data_xfer_done_msg_alloc_func_t)
2011 2004 ddi_modsym(pppt_mod, "stmf_ic_scsi_data_xfer_done_msg_alloc",
2012 2005 &error)) == NULL)) {
2013 2006 cmn_err(CE_WARN,
2014 2007 "Unable to find symbol -"
2015 2008 "stmf_ic_scsi_data_xfer_done_msg_alloc");
2016 2009 return (STMF_FAILURE);
2017 2010 }
2018 2011
2019 2012 if (ic_session_reg_msg_alloc == NULL &&
2020 2013 ((ic_session_reg_msg_alloc =
2021 2014 (stmf_ic_session_create_msg_alloc_func_t)
2022 2015 ddi_modsym(pppt_mod, "stmf_ic_session_create_msg_alloc",
2023 2016 &error)) == NULL)) {
2024 2017 cmn_err(CE_WARN,
2025 2018 "Unable to find symbol -"
2026 2019 "stmf_ic_session_create_msg_alloc");
2027 2020 return (STMF_FAILURE);
2028 2021 }
2029 2022
2030 2023 if (ic_session_dereg_msg_alloc == NULL &&
2031 2024 ((ic_session_dereg_msg_alloc =
2032 2025 (stmf_ic_session_destroy_msg_alloc_func_t)
2033 2026 ddi_modsym(pppt_mod, "stmf_ic_session_destroy_msg_alloc",
2034 2027 &error)) == NULL)) {
2035 2028 cmn_err(CE_WARN,
2036 2029 "Unable to find symbol -"
2037 2030 "stmf_ic_session_destroy_msg_alloc");
2038 2031 return (STMF_FAILURE);
2039 2032 }
2040 2033
2041 2034 if (ic_tx_msg == NULL && ((ic_tx_msg =
2042 2035 (stmf_ic_tx_msg_func_t)ddi_modsym(pppt_mod, "stmf_ic_tx_msg",
2043 2036 &error)) == NULL)) {
2044 2037 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_tx_msg");
2045 2038 return (STMF_FAILURE);
2046 2039 }
2047 2040
2048 2041 if (ic_msg_free == NULL && ((ic_msg_free =
2049 2042 (stmf_ic_msg_free_func_t)ddi_modsym(pppt_mod, "stmf_ic_msg_free",
2050 2043 &error)) == NULL)) {
2051 2044 cmn_err(CE_WARN, "Unable to find symbol - stmf_ic_msg_free");
2052 2045 return (STMF_FAILURE);
2053 2046 }
2054 2047 return (STMF_SUCCESS);
2055 2048 }
2056 2049
2057 2050 static void
2058 2051 stmf_get_alua_state(stmf_alua_state_desc_t *alua_state)
2059 2052 {
2060 2053 mutex_enter(&stmf_state.stmf_lock);
2061 2054 alua_state->alua_node = stmf_state.stmf_alua_node;
2062 2055 alua_state->alua_state = stmf_state.stmf_alua_state;
2063 2056 mutex_exit(&stmf_state.stmf_lock);
2064 2057 }
2065 2058
2066 2059
2067 2060 static int
2068 2061 stmf_set_alua_state(stmf_alua_state_desc_t *alua_state)
2069 2062 {
2070 2063 stmf_i_local_port_t *ilport;
2071 2064 stmf_i_lu_t *ilu;
2072 2065 stmf_lu_t *lu;
2073 2066 stmf_ic_msg_status_t ic_ret;
2074 2067 stmf_ic_msg_t *ic_reg_lun, *ic_reg_port;
2075 2068 stmf_local_port_t *lport;
2076 2069 int ret = 0;
2077 2070
2078 2071 if (alua_state->alua_state > 1 || alua_state->alua_node > 1) {
2079 2072 return (EINVAL);
2080 2073 }
2081 2074
2082 2075 mutex_enter(&stmf_state.stmf_lock);
2083 2076 if (alua_state->alua_state == 1) {
2084 2077 if (pppt_modload() == STMF_FAILURE) {
2085 2078 ret = EIO;
2086 2079 goto err;
2087 2080 }
2088 2081 if (alua_state->alua_node != 0) {
2089 2082 /* reset existing rtpids to new base */
2090 2083 stmf_rtpid_counter = 255;
2091 2084 }
2092 2085 stmf_state.stmf_alua_node = alua_state->alua_node;
2093 2086 stmf_state.stmf_alua_state = 1;
2094 2087 /* register existing local ports with ppp */
2095 2088 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2096 2089 ilport = ilport->ilport_next) {
2097 2090 /* skip standby ports and non-alua participants */
2098 2091 if (ilport->ilport_standby == 1 ||
2099 2092 ilport->ilport_alua == 0) {
2100 2093 continue;
2101 2094 }
2102 2095 if (alua_state->alua_node != 0) {
2103 2096 ilport->ilport_rtpid =
2104 2097 atomic_add_16_nv(&stmf_rtpid_counter, 1);
2105 2098 }
2106 2099 lport = ilport->ilport_lport;
2107 2100 ic_reg_port = ic_reg_port_msg_alloc(
2108 2101 lport->lport_id, ilport->ilport_rtpid,
2109 2102 0, NULL, stmf_proxy_msg_id);
2110 2103 if (ic_reg_port) {
2111 2104 ic_ret = ic_tx_msg(ic_reg_port);
2112 2105 if (ic_ret == STMF_IC_MSG_SUCCESS) {
2113 2106 ilport->ilport_reg_msgid =
2114 2107 stmf_proxy_msg_id++;
2115 2108 } else {
2116 2109 cmn_err(CE_WARN,
2117 2110 "error on port registration "
2118 2111 "port - %s",
2119 2112 ilport->ilport_kstat_tgt_name);
2120 2113 }
2121 2114 }
2122 2115 }
2123 2116 /* register existing logical units */
2124 2117 for (ilu = stmf_state.stmf_ilulist; ilu != NULL;
2125 2118 ilu = ilu->ilu_next) {
2126 2119 if (ilu->ilu_access != STMF_LU_ACTIVE) {
2127 2120 continue;
2128 2121 }
2129 2122 /* register with proxy module */
2130 2123 lu = ilu->ilu_lu;
2131 2124 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
2132 2125 lu->lu_lp->lp_alua_support) {
2133 2126 ilu->ilu_alua = 1;
2134 2127 /* allocate the register message */
2135 2128 ic_reg_lun = ic_reg_lun_msg_alloc(
2136 2129 lu->lu_id->ident, lu->lu_lp->lp_name,
2137 2130 lu->lu_proxy_reg_arg_len,
2138 2131 (uint8_t *)lu->lu_proxy_reg_arg,
2139 2132 stmf_proxy_msg_id);
2140 2133 /* send the message */
2141 2134 if (ic_reg_lun) {
2142 2135 ic_ret = ic_tx_msg(ic_reg_lun);
2143 2136 if (ic_ret == STMF_IC_MSG_SUCCESS) {
2144 2137 stmf_proxy_msg_id++;
2145 2138 }
2146 2139 }
2147 2140 }
2148 2141 }
2149 2142 } else {
2150 2143 stmf_state.stmf_alua_state = 0;
2151 2144 }
2152 2145
2153 2146 err:
2154 2147 mutex_exit(&stmf_state.stmf_lock);
2155 2148 return (ret);
2156 2149 }
2157 2150
2158 2151
2159 2152 typedef struct {
2160 2153 void *bp; /* back pointer from internal struct to main struct */
2161 2154 int alloc_size;
2162 2155 } __istmf_t;
2163 2156
2164 2157 typedef struct {
2165 2158 __istmf_t *fp; /* Framework private */
2166 2159 void *cp; /* Caller private */
2167 2160 void *ss; /* struct specific */
2168 2161 } __stmf_t;
2169 2162
2170 2163 static struct {
2171 2164 int shared;
2172 2165 int fw_private;
2173 2166 } stmf_sizes[] = { { 0, 0 },
2174 2167 { GET_STRUCT_SIZE(stmf_lu_provider_t),
2175 2168 GET_STRUCT_SIZE(stmf_i_lu_provider_t) },
2176 2169 { GET_STRUCT_SIZE(stmf_port_provider_t),
2177 2170 GET_STRUCT_SIZE(stmf_i_port_provider_t) },
2178 2171 { GET_STRUCT_SIZE(stmf_local_port_t),
2179 2172 GET_STRUCT_SIZE(stmf_i_local_port_t) },
2180 2173 { GET_STRUCT_SIZE(stmf_lu_t),
2181 2174 GET_STRUCT_SIZE(stmf_i_lu_t) },
2182 2175 { GET_STRUCT_SIZE(stmf_scsi_session_t),
2183 2176 GET_STRUCT_SIZE(stmf_i_scsi_session_t) },
2184 2177 { GET_STRUCT_SIZE(scsi_task_t),
2185 2178 GET_STRUCT_SIZE(stmf_i_scsi_task_t) },
2186 2179 { GET_STRUCT_SIZE(stmf_data_buf_t),
2187 2180 GET_STRUCT_SIZE(__istmf_t) },
2188 2181 { GET_STRUCT_SIZE(stmf_dbuf_store_t),
2189 2182 GET_STRUCT_SIZE(__istmf_t) }
2190 2183
2191 2184 };
2192 2185
2193 2186 void *
2194 2187 stmf_alloc(stmf_struct_id_t struct_id, int additional_size, int flags)
2195 2188 {
2196 2189 int stmf_size;
2197 2190 int kmem_flag;
2198 2191 __stmf_t *sh;
2199 2192
2200 2193 if ((struct_id == 0) || (struct_id >= STMF_MAX_STRUCT_IDS))
2201 2194 return (NULL);
2202 2195
2203 2196 if ((curthread->t_flag & T_INTR_THREAD) || (flags & AF_FORCE_NOSLEEP)) {
2204 2197 kmem_flag = KM_NOSLEEP;
2205 2198 } else {
2206 2199 kmem_flag = KM_SLEEP;
2207 2200 }
2208 2201
2209 2202 additional_size = (additional_size + 7) & (~7);
2210 2203 stmf_size = stmf_sizes[struct_id].shared +
2211 2204 stmf_sizes[struct_id].fw_private + additional_size;
2212 2205
2213 2206 if (flags & AF_DONTZERO)
2214 2207 sh = (__stmf_t *)kmem_alloc(stmf_size, kmem_flag);
2215 2208 else
2216 2209 sh = (__stmf_t *)kmem_zalloc(stmf_size, kmem_flag);
2217 2210
2218 2211 if (sh == NULL)
2219 2212 return (NULL);
2220 2213
2221 2214 /*
2222 2215 * In principle, the implementation inside stmf_alloc should not
2223 2216 * be changed anyway. But the original order of framework private
2224 2217 * data and caller private data does not support sglist in the caller
2225 2218 * private data.
2226 2219 * To work around this, the memory segments of framework private
2227 2220 * data and caller private data are re-ordered here.
2228 2221 * A better solution is to provide a specific interface to allocate
2229 2222 * the sglist, then we will not need this workaround any more.
2230 2223 * But before the new interface is available, the memory segment
2231 2224 * ordering should be kept as is.
2232 2225 */
2233 2226 sh->cp = GET_BYTE_OFFSET(sh, stmf_sizes[struct_id].shared);
2234 2227 sh->fp = (__istmf_t *)GET_BYTE_OFFSET(sh,
2235 2228 stmf_sizes[struct_id].shared + additional_size);
2236 2229
2237 2230 sh->fp->bp = sh;
2238 2231 /* Just store the total size instead of storing additional size */
2239 2232 sh->fp->alloc_size = stmf_size;
2240 2233
2241 2234 return (sh);
2242 2235 }
2243 2236
2244 2237 void
2245 2238 stmf_free(void *ptr)
2246 2239 {
2247 2240 __stmf_t *sh = (__stmf_t *)ptr;
2248 2241
2249 2242 /*
2250 2243 * So far we dont need any struct specific processing. If such
2251 2244 * a need ever arises, then store the struct id in the framework
2252 2245 * private section and get it here as sh->fp->struct_id.
2253 2246 */
2254 2247 kmem_free(ptr, sh->fp->alloc_size);
2255 2248 }
2256 2249
2257 2250 /*
2258 2251 * Given a pointer to stmf_lu_t, verifies if this lu is registered with the
2259 2252 * framework and returns a pointer to framework private data for the lu.
2260 2253 * Returns NULL if the lu was not found.
2261 2254 */
2262 2255 stmf_i_lu_t *
2263 2256 stmf_lookup_lu(stmf_lu_t *lu)
2264 2257 {
2265 2258 stmf_i_lu_t *ilu;
2266 2259 ASSERT(mutex_owned(&stmf_state.stmf_lock));
2267 2260
2268 2261 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
2269 2262 if (ilu->ilu_lu == lu)
2270 2263 return (ilu);
2271 2264 }
2272 2265 return (NULL);
2273 2266 }
2274 2267
2275 2268 /*
2276 2269 * Given a pointer to stmf_local_port_t, verifies if this lport is registered
2277 2270 * with the framework and returns a pointer to framework private data for
2278 2271 * the lport.
2279 2272 * Returns NULL if the lport was not found.
2280 2273 */
2281 2274 stmf_i_local_port_t *
2282 2275 stmf_lookup_lport(stmf_local_port_t *lport)
2283 2276 {
2284 2277 stmf_i_local_port_t *ilport;
2285 2278 ASSERT(mutex_owned(&stmf_state.stmf_lock));
2286 2279
2287 2280 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
2288 2281 ilport = ilport->ilport_next) {
2289 2282 if (ilport->ilport_lport == lport)
2290 2283 return (ilport);
2291 2284 }
2292 2285 return (NULL);
2293 2286 }
2294 2287
2295 2288 stmf_status_t
2296 2289 stmf_register_lu_provider(stmf_lu_provider_t *lp)
2297 2290 {
2298 2291 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2299 2292 stmf_pp_data_t *ppd;
2300 2293 uint32_t cb_flags;
2301 2294
2302 2295 if (lp->lp_lpif_rev != LPIF_REV_1 && lp->lp_lpif_rev != LPIF_REV_2)
2303 2296 return (STMF_FAILURE);
2304 2297
2305 2298 mutex_enter(&stmf_state.stmf_lock);
2306 2299 ilp->ilp_next = stmf_state.stmf_ilplist;
2307 2300 stmf_state.stmf_ilplist = ilp;
2308 2301 stmf_state.stmf_nlps++;
2309 2302
2310 2303 /* See if we need to do a callback */
2311 2304 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2312 2305 if (strcmp(ppd->ppd_name, lp->lp_name) == 0) {
2313 2306 break;
2314 2307 }
2315 2308 }
2316 2309 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2317 2310 goto rlp_bail_out;
2318 2311 }
2319 2312 ilp->ilp_ppd = ppd;
2320 2313 ppd->ppd_provider = ilp;
2321 2314 if (lp->lp_cb == NULL)
2322 2315 goto rlp_bail_out;
2323 2316 ilp->ilp_cb_in_progress = 1;
2324 2317 cb_flags = STMF_PCB_PREG_COMPLETE;
2325 2318 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2326 2319 cb_flags |= STMF_PCB_STMF_ONLINING;
2327 2320 mutex_exit(&stmf_state.stmf_lock);
2328 2321 lp->lp_cb(lp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2329 2322 mutex_enter(&stmf_state.stmf_lock);
2330 2323 ilp->ilp_cb_in_progress = 0;
2331 2324
2332 2325 rlp_bail_out:
2333 2326 mutex_exit(&stmf_state.stmf_lock);
2334 2327
2335 2328 return (STMF_SUCCESS);
2336 2329 }
2337 2330
2338 2331 stmf_status_t
2339 2332 stmf_deregister_lu_provider(stmf_lu_provider_t *lp)
2340 2333 {
2341 2334 stmf_i_lu_provider_t **ppilp;
2342 2335 stmf_i_lu_provider_t *ilp = (stmf_i_lu_provider_t *)lp->lp_stmf_private;
2343 2336
2344 2337 mutex_enter(&stmf_state.stmf_lock);
2345 2338 if (ilp->ilp_nlus || ilp->ilp_cb_in_progress) {
2346 2339 mutex_exit(&stmf_state.stmf_lock);
2347 2340 return (STMF_BUSY);
2348 2341 }
2349 2342 for (ppilp = &stmf_state.stmf_ilplist; *ppilp != NULL;
2350 2343 ppilp = &((*ppilp)->ilp_next)) {
2351 2344 if (*ppilp == ilp) {
2352 2345 *ppilp = ilp->ilp_next;
2353 2346 stmf_state.stmf_nlps--;
2354 2347 if (ilp->ilp_ppd) {
2355 2348 ilp->ilp_ppd->ppd_provider = NULL;
2356 2349 ilp->ilp_ppd = NULL;
2357 2350 }
2358 2351 mutex_exit(&stmf_state.stmf_lock);
2359 2352 return (STMF_SUCCESS);
2360 2353 }
2361 2354 }
2362 2355 mutex_exit(&stmf_state.stmf_lock);
2363 2356 return (STMF_NOT_FOUND);
2364 2357 }
2365 2358
2366 2359 stmf_status_t
2367 2360 stmf_register_port_provider(stmf_port_provider_t *pp)
2368 2361 {
2369 2362 stmf_i_port_provider_t *ipp =
2370 2363 (stmf_i_port_provider_t *)pp->pp_stmf_private;
2371 2364 stmf_pp_data_t *ppd;
2372 2365 uint32_t cb_flags;
2373 2366
2374 2367 if (pp->pp_portif_rev != PORTIF_REV_1)
2375 2368 return (STMF_FAILURE);
2376 2369
2377 2370 mutex_enter(&stmf_state.stmf_lock);
2378 2371 ipp->ipp_next = stmf_state.stmf_ipplist;
2379 2372 stmf_state.stmf_ipplist = ipp;
2380 2373 stmf_state.stmf_npps++;
2381 2374 /* See if we need to do a callback */
2382 2375 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2383 2376 if (strcmp(ppd->ppd_name, pp->pp_name) == 0) {
2384 2377 break;
2385 2378 }
2386 2379 }
2387 2380 if ((ppd == NULL) || (ppd->ppd_nv == NULL)) {
2388 2381 goto rpp_bail_out;
2389 2382 }
2390 2383 ipp->ipp_ppd = ppd;
2391 2384 ppd->ppd_provider = ipp;
2392 2385 if (pp->pp_cb == NULL)
2393 2386 goto rpp_bail_out;
2394 2387 ipp->ipp_cb_in_progress = 1;
2395 2388 cb_flags = STMF_PCB_PREG_COMPLETE;
2396 2389 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2397 2390 cb_flags |= STMF_PCB_STMF_ONLINING;
2398 2391 mutex_exit(&stmf_state.stmf_lock);
2399 2392 pp->pp_cb(pp, STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2400 2393 mutex_enter(&stmf_state.stmf_lock);
2401 2394 ipp->ipp_cb_in_progress = 0;
2402 2395
2403 2396 rpp_bail_out:
2404 2397 mutex_exit(&stmf_state.stmf_lock);
2405 2398
2406 2399 return (STMF_SUCCESS);
2407 2400 }
2408 2401
2409 2402 stmf_status_t
2410 2403 stmf_deregister_port_provider(stmf_port_provider_t *pp)
2411 2404 {
2412 2405 stmf_i_port_provider_t *ipp =
2413 2406 (stmf_i_port_provider_t *)pp->pp_stmf_private;
2414 2407 stmf_i_port_provider_t **ppipp;
2415 2408
2416 2409 mutex_enter(&stmf_state.stmf_lock);
2417 2410 if (ipp->ipp_npps || ipp->ipp_cb_in_progress) {
2418 2411 mutex_exit(&stmf_state.stmf_lock);
2419 2412 return (STMF_BUSY);
2420 2413 }
2421 2414 for (ppipp = &stmf_state.stmf_ipplist; *ppipp != NULL;
2422 2415 ppipp = &((*ppipp)->ipp_next)) {
2423 2416 if (*ppipp == ipp) {
2424 2417 *ppipp = ipp->ipp_next;
2425 2418 stmf_state.stmf_npps--;
2426 2419 if (ipp->ipp_ppd) {
2427 2420 ipp->ipp_ppd->ppd_provider = NULL;
2428 2421 ipp->ipp_ppd = NULL;
2429 2422 }
2430 2423 mutex_exit(&stmf_state.stmf_lock);
2431 2424 return (STMF_SUCCESS);
2432 2425 }
2433 2426 }
2434 2427 mutex_exit(&stmf_state.stmf_lock);
2435 2428 return (STMF_NOT_FOUND);
2436 2429 }
2437 2430
2438 2431 int
2439 2432 stmf_load_ppd_ioctl(stmf_ppioctl_data_t *ppi, uint64_t *ppi_token,
2440 2433 uint32_t *err_ret)
2441 2434 {
2442 2435 stmf_i_port_provider_t *ipp;
2443 2436 stmf_i_lu_provider_t *ilp;
2444 2437 stmf_pp_data_t *ppd;
2445 2438 nvlist_t *nv;
2446 2439 int s;
2447 2440 int ret;
2448 2441
2449 2442 *err_ret = 0;
2450 2443
2451 2444 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2452 2445 return (EINVAL);
2453 2446 }
2454 2447
2455 2448 mutex_enter(&stmf_state.stmf_lock);
2456 2449 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2457 2450 if (ppi->ppi_lu_provider) {
2458 2451 if (!ppd->ppd_lu_provider)
2459 2452 continue;
2460 2453 } else if (ppi->ppi_port_provider) {
2461 2454 if (!ppd->ppd_port_provider)
2462 2455 continue;
2463 2456 }
2464 2457 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2465 2458 break;
2466 2459 }
2467 2460
2468 2461 if (ppd == NULL) {
2469 2462 /* New provider */
2470 2463 s = strlen(ppi->ppi_name);
2471 2464 if (s > 254) {
2472 2465 mutex_exit(&stmf_state.stmf_lock);
2473 2466 return (EINVAL);
2474 2467 }
2475 2468 s += sizeof (stmf_pp_data_t) - 7;
2476 2469
2477 2470 ppd = kmem_zalloc(s, KM_NOSLEEP);
2478 2471 if (ppd == NULL) {
2479 2472 mutex_exit(&stmf_state.stmf_lock);
2480 2473 return (ENOMEM);
2481 2474 }
2482 2475 ppd->ppd_alloc_size = s;
2483 2476 (void) strcpy(ppd->ppd_name, ppi->ppi_name);
2484 2477
2485 2478 /* See if this provider already exists */
2486 2479 if (ppi->ppi_lu_provider) {
2487 2480 ppd->ppd_lu_provider = 1;
2488 2481 for (ilp = stmf_state.stmf_ilplist; ilp != NULL;
2489 2482 ilp = ilp->ilp_next) {
2490 2483 if (strcmp(ppi->ppi_name,
2491 2484 ilp->ilp_lp->lp_name) == 0) {
2492 2485 ppd->ppd_provider = ilp;
2493 2486 ilp->ilp_ppd = ppd;
2494 2487 break;
2495 2488 }
2496 2489 }
2497 2490 } else {
2498 2491 ppd->ppd_port_provider = 1;
2499 2492 for (ipp = stmf_state.stmf_ipplist; ipp != NULL;
2500 2493 ipp = ipp->ipp_next) {
2501 2494 if (strcmp(ppi->ppi_name,
2502 2495 ipp->ipp_pp->pp_name) == 0) {
2503 2496 ppd->ppd_provider = ipp;
2504 2497 ipp->ipp_ppd = ppd;
2505 2498 break;
2506 2499 }
2507 2500 }
2508 2501 }
2509 2502
2510 2503 /* Link this ppd in */
2511 2504 ppd->ppd_next = stmf_state.stmf_ppdlist;
2512 2505 stmf_state.stmf_ppdlist = ppd;
2513 2506 }
2514 2507
2515 2508 /*
2516 2509 * User is requesting that the token be checked.
2517 2510 * If there was another set after the user's get
2518 2511 * it's an error
2519 2512 */
2520 2513 if (ppi->ppi_token_valid) {
2521 2514 if (ppi->ppi_token != ppd->ppd_token) {
2522 2515 *err_ret = STMF_IOCERR_PPD_UPDATED;
2523 2516 mutex_exit(&stmf_state.stmf_lock);
2524 2517 return (EINVAL);
2525 2518 }
2526 2519 }
2527 2520
2528 2521 if ((ret = nvlist_unpack((char *)ppi->ppi_data,
2529 2522 (size_t)ppi->ppi_data_size, &nv, KM_NOSLEEP)) != 0) {
2530 2523 mutex_exit(&stmf_state.stmf_lock);
2531 2524 return (ret);
2532 2525 }
2533 2526
2534 2527 /* Free any existing lists and add this one to the ppd */
2535 2528 if (ppd->ppd_nv)
2536 2529 nvlist_free(ppd->ppd_nv);
2537 2530 ppd->ppd_nv = nv;
2538 2531
2539 2532 /* set the token for writes */
2540 2533 ppd->ppd_token++;
2541 2534 /* return token to caller */
2542 2535 if (ppi_token) {
2543 2536 *ppi_token = ppd->ppd_token;
2544 2537 }
2545 2538
2546 2539 /* If there is a provider registered, do the notifications */
2547 2540 if (ppd->ppd_provider) {
2548 2541 uint32_t cb_flags = 0;
2549 2542
2550 2543 if (stmf_state.stmf_config_state == STMF_CONFIG_INIT)
2551 2544 cb_flags |= STMF_PCB_STMF_ONLINING;
2552 2545 if (ppi->ppi_lu_provider) {
2553 2546 ilp = (stmf_i_lu_provider_t *)ppd->ppd_provider;
2554 2547 if (ilp->ilp_lp->lp_cb == NULL)
2555 2548 goto bail_out;
2556 2549 ilp->ilp_cb_in_progress = 1;
2557 2550 mutex_exit(&stmf_state.stmf_lock);
2558 2551 ilp->ilp_lp->lp_cb(ilp->ilp_lp,
2559 2552 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2560 2553 mutex_enter(&stmf_state.stmf_lock);
2561 2554 ilp->ilp_cb_in_progress = 0;
2562 2555 } else {
2563 2556 ipp = (stmf_i_port_provider_t *)ppd->ppd_provider;
2564 2557 if (ipp->ipp_pp->pp_cb == NULL)
2565 2558 goto bail_out;
2566 2559 ipp->ipp_cb_in_progress = 1;
2567 2560 mutex_exit(&stmf_state.stmf_lock);
2568 2561 ipp->ipp_pp->pp_cb(ipp->ipp_pp,
2569 2562 STMF_PROVIDER_DATA_UPDATED, ppd->ppd_nv, cb_flags);
2570 2563 mutex_enter(&stmf_state.stmf_lock);
2571 2564 ipp->ipp_cb_in_progress = 0;
2572 2565 }
2573 2566 }
2574 2567
2575 2568 bail_out:
2576 2569 mutex_exit(&stmf_state.stmf_lock);
2577 2570
2578 2571 return (0);
2579 2572 }
2580 2573
2581 2574 void
2582 2575 stmf_delete_ppd(stmf_pp_data_t *ppd)
2583 2576 {
2584 2577 stmf_pp_data_t **pppd;
2585 2578
2586 2579 ASSERT(mutex_owned(&stmf_state.stmf_lock));
2587 2580 if (ppd->ppd_provider) {
2588 2581 if (ppd->ppd_lu_provider) {
2589 2582 ((stmf_i_lu_provider_t *)
2590 2583 ppd->ppd_provider)->ilp_ppd = NULL;
2591 2584 } else {
2592 2585 ((stmf_i_port_provider_t *)
2593 2586 ppd->ppd_provider)->ipp_ppd = NULL;
2594 2587 }
2595 2588 ppd->ppd_provider = NULL;
2596 2589 }
2597 2590
2598 2591 for (pppd = &stmf_state.stmf_ppdlist; *pppd != NULL;
2599 2592 pppd = &((*pppd)->ppd_next)) {
2600 2593 if (*pppd == ppd)
2601 2594 break;
2602 2595 }
2603 2596
2604 2597 if (*pppd == NULL)
2605 2598 return;
2606 2599
2607 2600 *pppd = ppd->ppd_next;
2608 2601 if (ppd->ppd_nv)
2609 2602 nvlist_free(ppd->ppd_nv);
2610 2603
2611 2604 kmem_free(ppd, ppd->ppd_alloc_size);
2612 2605 }
2613 2606
2614 2607 int
2615 2608 stmf_delete_ppd_ioctl(stmf_ppioctl_data_t *ppi)
2616 2609 {
2617 2610 stmf_pp_data_t *ppd;
2618 2611 int ret = ENOENT;
2619 2612
2620 2613 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2621 2614 return (EINVAL);
2622 2615 }
2623 2616
2624 2617 mutex_enter(&stmf_state.stmf_lock);
2625 2618
2626 2619 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2627 2620 if (ppi->ppi_lu_provider) {
2628 2621 if (!ppd->ppd_lu_provider)
2629 2622 continue;
2630 2623 } else if (ppi->ppi_port_provider) {
2631 2624 if (!ppd->ppd_port_provider)
2632 2625 continue;
2633 2626 }
2634 2627 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2635 2628 break;
2636 2629 }
2637 2630
2638 2631 if (ppd) {
2639 2632 ret = 0;
2640 2633 stmf_delete_ppd(ppd);
2641 2634 }
2642 2635 mutex_exit(&stmf_state.stmf_lock);
2643 2636
2644 2637 return (ret);
2645 2638 }
2646 2639
2647 2640 int
2648 2641 stmf_get_ppd_ioctl(stmf_ppioctl_data_t *ppi, stmf_ppioctl_data_t *ppi_out,
2649 2642 uint32_t *err_ret)
2650 2643 {
2651 2644 stmf_pp_data_t *ppd;
2652 2645 size_t req_size;
2653 2646 int ret = ENOENT;
2654 2647 char *bufp = (char *)ppi_out->ppi_data;
2655 2648
2656 2649 if ((ppi->ppi_lu_provider + ppi->ppi_port_provider) != 1) {
2657 2650 return (EINVAL);
2658 2651 }
2659 2652
2660 2653 mutex_enter(&stmf_state.stmf_lock);
2661 2654
2662 2655 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = ppd->ppd_next) {
2663 2656 if (ppi->ppi_lu_provider) {
2664 2657 if (!ppd->ppd_lu_provider)
2665 2658 continue;
2666 2659 } else if (ppi->ppi_port_provider) {
2667 2660 if (!ppd->ppd_port_provider)
2668 2661 continue;
2669 2662 }
2670 2663 if (strncmp(ppi->ppi_name, ppd->ppd_name, 254) == 0)
2671 2664 break;
2672 2665 }
2673 2666
2674 2667 if (ppd && ppd->ppd_nv) {
2675 2668 ppi_out->ppi_token = ppd->ppd_token;
2676 2669 if ((ret = nvlist_size(ppd->ppd_nv, &req_size,
2677 2670 NV_ENCODE_XDR)) != 0) {
2678 2671 goto done;
2679 2672 }
2680 2673 ppi_out->ppi_data_size = req_size;
2681 2674 if (req_size > ppi->ppi_data_size) {
2682 2675 *err_ret = STMF_IOCERR_INSUFFICIENT_BUF;
2683 2676 ret = EINVAL;
2684 2677 goto done;
2685 2678 }
2686 2679
2687 2680 if ((ret = nvlist_pack(ppd->ppd_nv, &bufp, &req_size,
2688 2681 NV_ENCODE_XDR, 0)) != 0) {
2689 2682 goto done;
2690 2683 }
2691 2684 ret = 0;
2692 2685 }
2693 2686
2694 2687 done:
2695 2688 mutex_exit(&stmf_state.stmf_lock);
2696 2689
2697 2690 return (ret);
2698 2691 }
2699 2692
2700 2693 void
2701 2694 stmf_delete_all_ppds()
2702 2695 {
2703 2696 stmf_pp_data_t *ppd, *nppd;
2704 2697
2705 2698 ASSERT(mutex_owned(&stmf_state.stmf_lock));
2706 2699 for (ppd = stmf_state.stmf_ppdlist; ppd != NULL; ppd = nppd) {
2707 2700 nppd = ppd->ppd_next;
2708 2701 stmf_delete_ppd(ppd);
2709 2702 }
2710 2703 }
2711 2704
2712 2705 /*
2713 2706 * 16 is the max string length of a protocol_ident, increase
2714 2707 * the size if needed.
2715 2708 */
2716 2709 #define STMF_KSTAT_LU_SZ (STMF_GUID_INPUT + 1 + 256)
2717 2710 #define STMF_KSTAT_TGT_SZ (256 * 2 + 16)
2718 2711
2719 2712 /*
2720 2713 * This array matches the Protocol Identifier in stmf_ioctl.h
2721 2714 */
2722 2715 #define MAX_PROTO_STR_LEN 32
2723 2716
2724 2717 char *protocol_ident[PROTOCOL_ANY] = {
2725 2718 "Fibre Channel",
2726 2719 "Parallel SCSI",
2727 2720 "SSA",
2728 2721 "IEEE_1394",
2729 2722 "SRP",
2730 2723 "iSCSI",
2731 2724 "SAS",
2732 2725 "ADT",
2733 2726 "ATAPI",
2734 2727 "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN", "UNKNOWN"
2735 2728 };
2736 2729
2737 2730 /*
2738 2731 * Update the lun wait/run queue count
2739 2732 */
2740 2733 static void
2741 2734 stmf_update_kstat_lu_q(scsi_task_t *task, void func())
2742 2735 {
2743 2736 stmf_i_lu_t *ilu;
2744 2737 kstat_io_t *kip;
2745 2738
2746 2739 if (task->task_lu == dlun0)
2747 2740 return;
2748 2741 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2749 2742 if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2750 2743 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2751 2744 if (kip != NULL) {
2752 2745 func(kip);
2753 2746 }
2754 2747 }
2755 2748 }
2756 2749
2757 2750 /*
2758 2751 * Update the target(lport) wait/run queue count
2759 2752 */
2760 2753 static void
2761 2754 stmf_update_kstat_lport_q(scsi_task_t *task, void func())
2762 2755 {
2763 2756 stmf_i_local_port_t *ilp;
2764 2757 kstat_io_t *kip;
2765 2758
2766 2759 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2767 2760 if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2768 2761 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2769 2762 if (kip != NULL) {
2770 2763 mutex_enter(ilp->ilport_kstat_io->ks_lock);
2771 2764 func(kip);
2772 2765 mutex_exit(ilp->ilport_kstat_io->ks_lock);
2773 2766 }
2774 2767 }
2775 2768 }
2776 2769
2777 2770 static void
2778 2771 stmf_update_kstat_lport_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2779 2772 {
2780 2773 stmf_i_local_port_t *ilp;
2781 2774 kstat_io_t *kip;
2782 2775
2783 2776 ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
2784 2777 if (ilp != NULL && ilp->ilport_kstat_io != NULL) {
2785 2778 kip = KSTAT_IO_PTR(ilp->ilport_kstat_io);
2786 2779 if (kip != NULL) {
2787 2780 mutex_enter(ilp->ilport_kstat_io->ks_lock);
2788 2781 STMF_UPDATE_KSTAT_IO(kip, dbuf);
2789 2782 mutex_exit(ilp->ilport_kstat_io->ks_lock);
2790 2783 }
2791 2784 }
2792 2785 }
2793 2786
2794 2787 static void
2795 2788 stmf_update_kstat_lu_io(scsi_task_t *task, stmf_data_buf_t *dbuf)
2796 2789 {
2797 2790 stmf_i_lu_t *ilu;
2798 2791 kstat_io_t *kip;
2799 2792
2800 2793 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
2801 2794 if (ilu != NULL && ilu->ilu_kstat_io != NULL) {
2802 2795 kip = KSTAT_IO_PTR(ilu->ilu_kstat_io);
2803 2796 if (kip != NULL) {
2804 2797 mutex_enter(ilu->ilu_kstat_io->ks_lock);
2805 2798 STMF_UPDATE_KSTAT_IO(kip, dbuf);
2806 2799 mutex_exit(ilu->ilu_kstat_io->ks_lock);
2807 2800 }
2808 2801 }
2809 2802 }
2810 2803
2811 2804 static void
2812 2805 stmf_create_kstat_lu(stmf_i_lu_t *ilu)
2813 2806 {
2814 2807 char ks_nm[KSTAT_STRLEN];
2815 2808 stmf_kstat_lu_info_t *ks_lu;
2816 2809
2817 2810 /* create kstat lun info */
2818 2811 ks_lu = (stmf_kstat_lu_info_t *)kmem_zalloc(STMF_KSTAT_LU_SZ,
2819 2812 KM_NOSLEEP);
2820 2813 if (ks_lu == NULL) {
2821 2814 cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2822 2815 return;
2823 2816 }
2824 2817
2825 2818 bzero(ks_nm, sizeof (ks_nm));
2826 2819 (void) sprintf(ks_nm, "stmf_lu_%"PRIxPTR"", (uintptr_t)ilu);
2827 2820 if ((ilu->ilu_kstat_info = kstat_create(STMF_MODULE_NAME, 0,
2828 2821 ks_nm, "misc", KSTAT_TYPE_NAMED,
2829 2822 sizeof (stmf_kstat_lu_info_t) / sizeof (kstat_named_t),
2830 2823 KSTAT_FLAG_VIRTUAL)) == NULL) {
2831 2824 kmem_free(ks_lu, STMF_KSTAT_LU_SZ);
2832 2825 cmn_err(CE_WARN, "STMF: kstat_create lu failed");
2833 2826 return;
2834 2827 }
2835 2828
2836 2829 ilu->ilu_kstat_info->ks_data_size = STMF_KSTAT_LU_SZ;
2837 2830 ilu->ilu_kstat_info->ks_data = ks_lu;
2838 2831
2839 2832 kstat_named_init(&ks_lu->i_lun_guid, "lun-guid",
2840 2833 KSTAT_DATA_STRING);
2841 2834 kstat_named_init(&ks_lu->i_lun_alias, "lun-alias",
2842 2835 KSTAT_DATA_STRING);
2843 2836
2844 2837 /* convert guid to hex string */
2845 2838 int i;
2846 2839 uint8_t *p = ilu->ilu_lu->lu_id->ident;
2847 2840 bzero(ilu->ilu_ascii_hex_guid, sizeof (ilu->ilu_ascii_hex_guid));
2848 2841 for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
2849 2842 (void) sprintf(&ilu->ilu_ascii_hex_guid[i * 2], "%02x", p[i]);
2850 2843 }
2851 2844 kstat_named_setstr(&ks_lu->i_lun_guid,
2852 2845 (const char *)ilu->ilu_ascii_hex_guid);
2853 2846 kstat_named_setstr(&ks_lu->i_lun_alias,
2854 2847 (const char *)ilu->ilu_lu->lu_alias);
2855 2848 kstat_install(ilu->ilu_kstat_info);
2856 2849
2857 2850 /* create kstat lun io */
2858 2851 bzero(ks_nm, sizeof (ks_nm));
2859 2852 (void) sprintf(ks_nm, "stmf_lu_io_%"PRIxPTR"", (uintptr_t)ilu);
2860 2853 if ((ilu->ilu_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
2861 2854 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
2862 2855 cmn_err(CE_WARN, "STMF: kstat_create lu_io failed");
2863 2856 return;
2864 2857 }
2865 2858 mutex_init(&ilu->ilu_kstat_lock, NULL, MUTEX_DRIVER, 0);
2866 2859 ilu->ilu_kstat_io->ks_lock = &ilu->ilu_kstat_lock;
2867 2860 kstat_install(ilu->ilu_kstat_io);
2868 2861 }
2869 2862
2870 2863 static void
2871 2864 stmf_create_kstat_lport(stmf_i_local_port_t *ilport)
2872 2865 {
2873 2866 char ks_nm[KSTAT_STRLEN];
2874 2867 stmf_kstat_tgt_info_t *ks_tgt;
2875 2868 int id, len;
2876 2869
2877 2870 /* create kstat lport info */
2878 2871 ks_tgt = (stmf_kstat_tgt_info_t *)kmem_zalloc(STMF_KSTAT_TGT_SZ,
2879 2872 KM_NOSLEEP);
2880 2873 if (ks_tgt == NULL) {
2881 2874 cmn_err(CE_WARN, "STMF: kmem_zalloc failed");
2882 2875 return;
2883 2876 }
2884 2877
2885 2878 bzero(ks_nm, sizeof (ks_nm));
2886 2879 (void) sprintf(ks_nm, "stmf_tgt_%"PRIxPTR"", (uintptr_t)ilport);
2887 2880 if ((ilport->ilport_kstat_info = kstat_create(STMF_MODULE_NAME,
2888 2881 0, ks_nm, "misc", KSTAT_TYPE_NAMED,
2889 2882 sizeof (stmf_kstat_tgt_info_t) / sizeof (kstat_named_t),
2890 2883 KSTAT_FLAG_VIRTUAL)) == NULL) {
2891 2884 kmem_free(ks_tgt, STMF_KSTAT_TGT_SZ);
2892 2885 cmn_err(CE_WARN, "STMF: kstat_create target failed");
2893 2886 return;
2894 2887 }
2895 2888
2896 2889 ilport->ilport_kstat_info->ks_data_size = STMF_KSTAT_TGT_SZ;
2897 2890 ilport->ilport_kstat_info->ks_data = ks_tgt;
2898 2891
2899 2892 kstat_named_init(&ks_tgt->i_tgt_name, "target-name",
2900 2893 KSTAT_DATA_STRING);
2901 2894 kstat_named_init(&ks_tgt->i_tgt_alias, "target-alias",
2902 2895 KSTAT_DATA_STRING);
2903 2896 kstat_named_init(&ks_tgt->i_protocol, "protocol",
2904 2897 KSTAT_DATA_STRING);
2905 2898
2906 2899 /* ident might not be null terminated */
2907 2900 len = ilport->ilport_lport->lport_id->ident_length;
2908 2901 bcopy(ilport->ilport_lport->lport_id->ident,
2909 2902 ilport->ilport_kstat_tgt_name, len);
2910 2903 ilport->ilport_kstat_tgt_name[len + 1] = NULL;
2911 2904 kstat_named_setstr(&ks_tgt->i_tgt_name,
2912 2905 (const char *)ilport->ilport_kstat_tgt_name);
2913 2906 kstat_named_setstr(&ks_tgt->i_tgt_alias,
2914 2907 (const char *)ilport->ilport_lport->lport_alias);
2915 2908 /* protocol */
2916 2909 if ((id = ilport->ilport_lport->lport_id->protocol_id) > PROTOCOL_ANY) {
2917 2910 cmn_err(CE_WARN, "STMF: protocol_id out of bound");
2918 2911 id = PROTOCOL_ANY;
2919 2912 }
2920 2913 kstat_named_setstr(&ks_tgt->i_protocol, protocol_ident[id]);
2921 2914 kstat_install(ilport->ilport_kstat_info);
2922 2915
2923 2916 /* create kstat lport io */
2924 2917 bzero(ks_nm, sizeof (ks_nm));
2925 2918 (void) sprintf(ks_nm, "stmf_tgt_io_%"PRIxPTR"", (uintptr_t)ilport);
2926 2919 if ((ilport->ilport_kstat_io = kstat_create(STMF_MODULE_NAME, 0,
2927 2920 ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
2928 2921 cmn_err(CE_WARN, "STMF: kstat_create target_io failed");
2929 2922 return;
2930 2923 }
2931 2924 mutex_init(&ilport->ilport_kstat_lock, NULL, MUTEX_DRIVER, 0);
2932 2925 ilport->ilport_kstat_io->ks_lock = &ilport->ilport_kstat_lock;
2933 2926 kstat_install(ilport->ilport_kstat_io);
2934 2927 }
2935 2928
2936 2929 /*
2937 2930 * set the asymmetric access state for a logical unit
2938 2931 * caller is responsible for establishing SCSI unit attention on
2939 2932 * state change
2940 2933 */
2941 2934 stmf_status_t
2942 2935 stmf_set_lu_access(stmf_lu_t *lu, uint8_t access_state)
2943 2936 {
2944 2937 stmf_i_lu_t *ilu;
2945 2938 uint8_t *p1, *p2;
2946 2939
2947 2940 if ((access_state != STMF_LU_STANDBY) &&
2948 2941 (access_state != STMF_LU_ACTIVE)) {
2949 2942 return (STMF_INVALID_ARG);
2950 2943 }
2951 2944
2952 2945 p1 = &lu->lu_id->ident[0];
2953 2946 mutex_enter(&stmf_state.stmf_lock);
2954 2947 if (stmf_state.stmf_inventory_locked) {
2955 2948 mutex_exit(&stmf_state.stmf_lock);
2956 2949 return (STMF_BUSY);
2957 2950 }
2958 2951
2959 2952 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
2960 2953 p2 = &ilu->ilu_lu->lu_id->ident[0];
2961 2954 if (bcmp(p1, p2, 16) == 0) {
2962 2955 break;
2963 2956 }
2964 2957 }
2965 2958
2966 2959 if (!ilu) {
2967 2960 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
2968 2961 } else {
2969 2962 /*
2970 2963 * We're changing access state on an existing logical unit
2971 2964 * Send the proxy registration message for this logical unit
2972 2965 * if we're in alua mode.
2973 2966 * If the requested state is STMF_LU_ACTIVE, we want to register
2974 2967 * this logical unit.
2975 2968 * If the requested state is STMF_LU_STANDBY, we're going to
2976 2969 * abort all tasks for this logical unit.
2977 2970 */
2978 2971 if (stmf_state.stmf_alua_state == 1 &&
2979 2972 access_state == STMF_LU_ACTIVE) {
2980 2973 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
2981 2974 stmf_ic_msg_t *ic_reg_lun;
2982 2975 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
2983 2976 lu->lu_lp->lp_alua_support) {
2984 2977 ilu->ilu_alua = 1;
2985 2978 /* allocate the register message */
2986 2979 ic_reg_lun = ic_lun_active_msg_alloc(p1,
2987 2980 lu->lu_lp->lp_name,
2988 2981 lu->lu_proxy_reg_arg_len,
2989 2982 (uint8_t *)lu->lu_proxy_reg_arg,
2990 2983 stmf_proxy_msg_id);
2991 2984 /* send the message */
2992 2985 if (ic_reg_lun) {
2993 2986 ic_ret = ic_tx_msg(ic_reg_lun);
2994 2987 if (ic_ret == STMF_IC_MSG_SUCCESS) {
2995 2988 stmf_proxy_msg_id++;
2996 2989 }
2997 2990 }
2998 2991 }
2999 2992 } else if (stmf_state.stmf_alua_state == 1 &&
3000 2993 access_state == STMF_LU_STANDBY) {
3001 2994 /* abort all tasks for this lu */
3002 2995 stmf_task_lu_killall(lu, NULL, STMF_ABORTED);
3003 2996 }
3004 2997 }
3005 2998
3006 2999 ilu->ilu_access = access_state;
3007 3000
3008 3001 mutex_exit(&stmf_state.stmf_lock);
3009 3002 return (STMF_SUCCESS);
3010 3003 }
3011 3004
3012 3005
3013 3006 stmf_status_t
3014 3007 stmf_register_lu(stmf_lu_t *lu)
3015 3008 {
3016 3009 stmf_i_lu_t *ilu;
3017 3010 uint8_t *p1, *p2;
3018 3011 stmf_state_change_info_t ssci;
3019 3012 stmf_id_data_t *luid;
3020 3013
3021 3014 if ((lu->lu_id->ident_type != ID_TYPE_NAA) ||
3022 3015 (lu->lu_id->ident_length != 16) ||
3023 3016 ((lu->lu_id->ident[0] & 0xf0) != 0x60)) {
3024 3017 return (STMF_INVALID_ARG);
3025 3018 }
3026 3019 p1 = &lu->lu_id->ident[0];
3027 3020 mutex_enter(&stmf_state.stmf_lock);
3028 3021 if (stmf_state.stmf_inventory_locked) {
3029 3022 mutex_exit(&stmf_state.stmf_lock);
3030 3023 return (STMF_BUSY);
3031 3024 }
3032 3025
3033 3026 for (ilu = stmf_state.stmf_ilulist; ilu != NULL; ilu = ilu->ilu_next) {
3034 3027 p2 = &ilu->ilu_lu->lu_id->ident[0];
3035 3028 if (bcmp(p1, p2, 16) == 0) {
3036 3029 mutex_exit(&stmf_state.stmf_lock);
3037 3030 return (STMF_ALREADY);
3038 3031 }
3039 3032 }
3040 3033
3041 3034 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
3042 3035 luid = stmf_lookup_id(&stmf_state.stmf_luid_list,
3043 3036 lu->lu_id->ident_length, lu->lu_id->ident);
3044 3037 if (luid) {
3045 3038 luid->id_pt_to_object = (void *)ilu;
3046 3039 ilu->ilu_luid = luid;
3047 3040 }
3048 3041 ilu->ilu_alias = NULL;
3049 3042
3050 3043 ilu->ilu_next = stmf_state.stmf_ilulist;
3051 3044 ilu->ilu_prev = NULL;
|
↓ open down ↓ |
1447 lines elided |
↑ open up ↑ |
3052 3045 if (ilu->ilu_next)
3053 3046 ilu->ilu_next->ilu_prev = ilu;
3054 3047 stmf_state.stmf_ilulist = ilu;
3055 3048 stmf_state.stmf_nlus++;
3056 3049 if (lu->lu_lp) {
3057 3050 ((stmf_i_lu_provider_t *)
3058 3051 (lu->lu_lp->lp_stmf_private))->ilp_nlus++;
3059 3052 }
3060 3053 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
3061 3054 STMF_EVENT_ALLOC_HANDLE(ilu->ilu_event_hdl);
3055 + cv_init(&ilu->ilu_offline_pending_cv, NULL, CV_DRIVER, NULL);
3062 3056 stmf_create_kstat_lu(ilu);
3063 3057 /*
3064 3058 * register with proxy module if available and logical unit
3065 3059 * is in active state
3066 3060 */
3067 3061 if (stmf_state.stmf_alua_state == 1 &&
3068 3062 ilu->ilu_access == STMF_LU_ACTIVE) {
3069 3063 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3070 3064 stmf_ic_msg_t *ic_reg_lun;
3071 3065 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3072 3066 lu->lu_lp->lp_alua_support) {
3073 3067 ilu->ilu_alua = 1;
3074 3068 /* allocate the register message */
3075 3069 ic_reg_lun = ic_reg_lun_msg_alloc(p1,
3076 3070 lu->lu_lp->lp_name, lu->lu_proxy_reg_arg_len,
3077 3071 (uint8_t *)lu->lu_proxy_reg_arg, stmf_proxy_msg_id);
3078 3072 /* send the message */
3079 3073 if (ic_reg_lun) {
3080 3074 ic_ret = ic_tx_msg(ic_reg_lun);
3081 3075 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3082 3076 stmf_proxy_msg_id++;
3083 3077 }
3084 3078 }
3085 3079 }
3086 3080 }
3087 3081 mutex_exit(&stmf_state.stmf_lock);
3088 3082
3089 3083 /* check the default state for lu */
3090 3084 if (stmf_state.stmf_default_lu_state == STMF_STATE_OFFLINE) {
3091 3085 ilu->ilu_prev_state = STMF_STATE_OFFLINE;
3092 3086 } else {
3093 3087 ilu->ilu_prev_state = STMF_STATE_ONLINE;
3094 3088 if (stmf_state.stmf_service_running) {
3095 3089 ssci.st_rflags = 0;
3096 3090 ssci.st_additional_info = NULL;
3097 3091 (void) stmf_ctl(STMF_CMD_LU_ONLINE, lu, &ssci);
3098 3092 }
3099 3093 }
3100 3094
3101 3095 /* XXX: Generate event */
3102 3096 return (STMF_SUCCESS);
3103 3097 }
3104 3098
3105 3099 stmf_status_t
3106 3100 stmf_deregister_lu(stmf_lu_t *lu)
3107 3101 {
3108 3102 stmf_i_lu_t *ilu;
3109 3103
3110 3104 mutex_enter(&stmf_state.stmf_lock);
3111 3105 if (stmf_state.stmf_inventory_locked) {
3112 3106 mutex_exit(&stmf_state.stmf_lock);
3113 3107 return (STMF_BUSY);
3114 3108 }
3115 3109 ilu = stmf_lookup_lu(lu);
3116 3110 if (ilu == NULL) {
3117 3111 mutex_exit(&stmf_state.stmf_lock);
3118 3112 return (STMF_INVALID_ARG);
3119 3113 }
3120 3114 if (ilu->ilu_state == STMF_STATE_OFFLINE) {
3121 3115 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free);
3122 3116 while (ilu->ilu_flags & ILU_STALL_DEREGISTER) {
3123 3117 cv_wait(&stmf_state.stmf_cv, &stmf_state.stmf_lock);
3124 3118 }
3125 3119 if (ilu->ilu_ntasks) {
3126 3120 stmf_i_scsi_task_t *itask, *nitask;
3127 3121
3128 3122 nitask = ilu->ilu_tasks;
3129 3123 do {
3130 3124 itask = nitask;
3131 3125 nitask = itask->itask_lu_next;
3132 3126 lu->lu_task_free(itask->itask_task);
3133 3127 stmf_free(itask->itask_task);
3134 3128 } while (nitask != NULL);
3135 3129
3136 3130 ilu->ilu_tasks = ilu->ilu_free_tasks = NULL;
3137 3131 ilu->ilu_ntasks = ilu->ilu_ntasks_free = 0;
3138 3132 }
3139 3133 /* de-register with proxy if available */
3140 3134 if (ilu->ilu_access == STMF_LU_ACTIVE &&
3141 3135 stmf_state.stmf_alua_state == 1) {
3142 3136 /* de-register with proxy module */
3143 3137 stmf_ic_msg_status_t ic_ret = STMF_IC_MSG_SUCCESS;
3144 3138 stmf_ic_msg_t *ic_dereg_lun;
3145 3139 if (lu->lu_lp && lu->lu_lp->lp_lpif_rev == LPIF_REV_2 &&
3146 3140 lu->lu_lp->lp_alua_support) {
3147 3141 ilu->ilu_alua = 1;
3148 3142 /* allocate the de-register message */
3149 3143 ic_dereg_lun = ic_dereg_lun_msg_alloc(
3150 3144 lu->lu_id->ident, lu->lu_lp->lp_name, 0,
3151 3145 NULL, stmf_proxy_msg_id);
3152 3146 /* send the message */
3153 3147 if (ic_dereg_lun) {
3154 3148 ic_ret = ic_tx_msg(ic_dereg_lun);
3155 3149 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3156 3150 stmf_proxy_msg_id++;
3157 3151 }
3158 3152 }
3159 3153 }
3160 3154 }
3161 3155
3162 3156 if (ilu->ilu_next)
3163 3157 ilu->ilu_next->ilu_prev = ilu->ilu_prev;
3164 3158 if (ilu->ilu_prev)
3165 3159 ilu->ilu_prev->ilu_next = ilu->ilu_next;
3166 3160 else
3167 3161 stmf_state.stmf_ilulist = ilu->ilu_next;
3168 3162 stmf_state.stmf_nlus--;
3169 3163
3170 3164 if (ilu == stmf_state.stmf_svc_ilu_draining) {
3171 3165 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
3172 3166 }
3173 3167 if (ilu == stmf_state.stmf_svc_ilu_timing) {
3174 3168 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
3175 3169 }
3176 3170 if (lu->lu_lp) {
3177 3171 ((stmf_i_lu_provider_t *)
3178 3172 (lu->lu_lp->lp_stmf_private))->ilp_nlus--;
3179 3173 }
3180 3174 if (ilu->ilu_luid) {
3181 3175 ((stmf_id_data_t *)ilu->ilu_luid)->id_pt_to_object =
3182 3176 NULL;
3183 3177 ilu->ilu_luid = NULL;
3184 3178 }
3185 3179 STMF_EVENT_FREE_HANDLE(ilu->ilu_event_hdl);
3186 3180 } else {
3187 3181 mutex_exit(&stmf_state.stmf_lock);
3188 3182 return (STMF_BUSY);
|
↓ open down ↓ |
117 lines elided |
↑ open up ↑ |
3189 3183 }
3190 3184 if (ilu->ilu_kstat_info) {
3191 3185 kmem_free(ilu->ilu_kstat_info->ks_data,
3192 3186 ilu->ilu_kstat_info->ks_data_size);
3193 3187 kstat_delete(ilu->ilu_kstat_info);
3194 3188 }
3195 3189 if (ilu->ilu_kstat_io) {
3196 3190 kstat_delete(ilu->ilu_kstat_io);
3197 3191 mutex_destroy(&ilu->ilu_kstat_lock);
3198 3192 }
3199 - stmf_delete_itl_kstat_by_guid(ilu->ilu_ascii_hex_guid);
3193 + cv_destroy(&ilu->ilu_offline_pending_cv);
3200 3194 mutex_exit(&stmf_state.stmf_lock);
3201 3195 return (STMF_SUCCESS);
3202 3196 }
3203 3197
3204 3198 void
3205 3199 stmf_set_port_standby(stmf_local_port_t *lport, uint16_t rtpid)
3206 3200 {
3207 3201 stmf_i_local_port_t *ilport =
3208 3202 (stmf_i_local_port_t *)lport->lport_stmf_private;
3209 3203 ilport->ilport_rtpid = rtpid;
3210 3204 ilport->ilport_standby = 1;
3211 3205 }
3212 3206
3213 3207 void
3214 3208 stmf_set_port_alua(stmf_local_port_t *lport)
3215 3209 {
3216 3210 stmf_i_local_port_t *ilport =
3217 3211 (stmf_i_local_port_t *)lport->lport_stmf_private;
3218 3212 ilport->ilport_alua = 1;
3219 3213 }
3220 3214
3221 3215 stmf_status_t
3222 3216 stmf_register_local_port(stmf_local_port_t *lport)
3223 3217 {
3224 3218 stmf_i_local_port_t *ilport;
3225 3219 stmf_state_change_info_t ssci;
3226 3220 int start_workers = 0;
3227 3221
3228 3222 mutex_enter(&stmf_state.stmf_lock);
3229 3223 if (stmf_state.stmf_inventory_locked) {
3230 3224 mutex_exit(&stmf_state.stmf_lock);
3231 3225 return (STMF_BUSY);
3232 3226 }
3233 3227 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3234 3228 rw_init(&ilport->ilport_lock, NULL, RW_DRIVER, NULL);
3235 3229
3236 3230 ilport->ilport_instance =
3237 3231 id_alloc_nosleep(stmf_state.stmf_ilport_inst_space);
3238 3232 if (ilport->ilport_instance == -1) {
3239 3233 mutex_exit(&stmf_state.stmf_lock);
3240 3234 return (STMF_FAILURE);
3241 3235 }
3242 3236 ilport->ilport_next = stmf_state.stmf_ilportlist;
3243 3237 ilport->ilport_prev = NULL;
3244 3238 if (ilport->ilport_next)
3245 3239 ilport->ilport_next->ilport_prev = ilport;
3246 3240 stmf_state.stmf_ilportlist = ilport;
3247 3241 stmf_state.stmf_nlports++;
3248 3242 if (lport->lport_pp) {
3249 3243 ((stmf_i_port_provider_t *)
3250 3244 (lport->lport_pp->pp_stmf_private))->ipp_npps++;
3251 3245 }
3252 3246 ilport->ilport_tg =
3253 3247 stmf_lookup_group_for_target(lport->lport_id->ident,
3254 3248 lport->lport_id->ident_length);
3255 3249
3256 3250 /*
3257 3251 * rtpid will/must be set if this is a standby port
3258 3252 * only register ports that are not standby (proxy) ports
3259 3253 * and ports that are alua participants (ilport_alua == 1)
3260 3254 */
3261 3255 if (ilport->ilport_standby == 0) {
3262 3256 ilport->ilport_rtpid = atomic_add_16_nv(&stmf_rtpid_counter, 1);
3263 3257 }
3264 3258
3265 3259 if (stmf_state.stmf_alua_state == 1 &&
3266 3260 ilport->ilport_standby == 0 &&
3267 3261 ilport->ilport_alua == 1) {
3268 3262 stmf_ic_msg_t *ic_reg_port;
3269 3263 stmf_ic_msg_status_t ic_ret;
3270 3264 stmf_local_port_t *lport;
3271 3265 lport = ilport->ilport_lport;
3272 3266 ic_reg_port = ic_reg_port_msg_alloc(
3273 3267 lport->lport_id, ilport->ilport_rtpid,
3274 3268 0, NULL, stmf_proxy_msg_id);
3275 3269 if (ic_reg_port) {
3276 3270 ic_ret = ic_tx_msg(ic_reg_port);
3277 3271 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3278 3272 ilport->ilport_reg_msgid = stmf_proxy_msg_id++;
3279 3273 } else {
3280 3274 cmn_err(CE_WARN, "error on port registration "
3281 3275 "port - %s", ilport->ilport_kstat_tgt_name);
3282 3276 }
3283 3277 }
3284 3278 }
3285 3279 STMF_EVENT_ALLOC_HANDLE(ilport->ilport_event_hdl);
3286 3280 stmf_create_kstat_lport(ilport);
3287 3281 if (stmf_workers_state == STMF_WORKERS_DISABLED) {
3288 3282 stmf_workers_state = STMF_WORKERS_ENABLING;
3289 3283 start_workers = 1;
3290 3284 }
3291 3285 mutex_exit(&stmf_state.stmf_lock);
3292 3286
3293 3287 if (start_workers)
3294 3288 stmf_worker_init();
3295 3289
3296 3290 /* the default state of LPORT */
3297 3291
3298 3292 if (stmf_state.stmf_default_lport_state == STMF_STATE_OFFLINE) {
3299 3293 ilport->ilport_prev_state = STMF_STATE_OFFLINE;
3300 3294 } else {
3301 3295 ilport->ilport_prev_state = STMF_STATE_ONLINE;
3302 3296 if (stmf_state.stmf_service_running) {
3303 3297 ssci.st_rflags = 0;
3304 3298 ssci.st_additional_info = NULL;
3305 3299 (void) stmf_ctl(STMF_CMD_LPORT_ONLINE, lport, &ssci);
3306 3300 }
3307 3301 }
3308 3302
3309 3303 /* XXX: Generate event */
3310 3304 return (STMF_SUCCESS);
3311 3305 }
3312 3306
3313 3307 stmf_status_t
3314 3308 stmf_deregister_local_port(stmf_local_port_t *lport)
3315 3309 {
3316 3310 stmf_i_local_port_t *ilport;
3317 3311
3318 3312 mutex_enter(&stmf_state.stmf_lock);
3319 3313 if (stmf_state.stmf_inventory_locked) {
3320 3314 mutex_exit(&stmf_state.stmf_lock);
3321 3315 return (STMF_BUSY);
3322 3316 }
3323 3317
3324 3318 /* dequeue all object requests from active queue */
3325 3319 stmf_svc_kill_obj_requests(lport);
3326 3320
3327 3321 ilport = (stmf_i_local_port_t *)lport->lport_stmf_private;
3328 3322
3329 3323 /*
3330 3324 * deregister ports that are not standby (proxy)
3331 3325 */
3332 3326 if (stmf_state.stmf_alua_state == 1 &&
3333 3327 ilport->ilport_standby == 0 &&
3334 3328 ilport->ilport_alua == 1) {
3335 3329 stmf_ic_msg_t *ic_dereg_port;
3336 3330 stmf_ic_msg_status_t ic_ret;
3337 3331 ic_dereg_port = ic_dereg_port_msg_alloc(
3338 3332 lport->lport_id, 0, NULL, stmf_proxy_msg_id);
3339 3333 if (ic_dereg_port) {
3340 3334 ic_ret = ic_tx_msg(ic_dereg_port);
3341 3335 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3342 3336 stmf_proxy_msg_id++;
3343 3337 }
3344 3338 }
3345 3339 }
3346 3340
3347 3341 if (ilport->ilport_nsessions == 0) {
3348 3342 if (ilport->ilport_next)
3349 3343 ilport->ilport_next->ilport_prev = ilport->ilport_prev;
3350 3344 if (ilport->ilport_prev)
3351 3345 ilport->ilport_prev->ilport_next = ilport->ilport_next;
3352 3346 else
3353 3347 stmf_state.stmf_ilportlist = ilport->ilport_next;
3354 3348 id_free(stmf_state.stmf_ilport_inst_space,
3355 3349 ilport->ilport_instance);
3356 3350 rw_destroy(&ilport->ilport_lock);
3357 3351 stmf_state.stmf_nlports--;
3358 3352 if (lport->lport_pp) {
3359 3353 ((stmf_i_port_provider_t *)
3360 3354 (lport->lport_pp->pp_stmf_private))->ipp_npps--;
3361 3355 }
3362 3356 ilport->ilport_tg = NULL;
3363 3357 STMF_EVENT_FREE_HANDLE(ilport->ilport_event_hdl);
3364 3358 } else {
3365 3359 mutex_exit(&stmf_state.stmf_lock);
3366 3360 return (STMF_BUSY);
|
↓ open down ↓ |
157 lines elided |
↑ open up ↑ |
3367 3361 }
3368 3362 if (ilport->ilport_kstat_info) {
3369 3363 kmem_free(ilport->ilport_kstat_info->ks_data,
3370 3364 ilport->ilport_kstat_info->ks_data_size);
3371 3365 kstat_delete(ilport->ilport_kstat_info);
3372 3366 }
3373 3367 if (ilport->ilport_kstat_io) {
3374 3368 kstat_delete(ilport->ilport_kstat_io);
3375 3369 mutex_destroy(&ilport->ilport_kstat_lock);
3376 3370 }
3377 - stmf_delete_itl_kstat_by_lport(ilport->ilport_kstat_tgt_name);
3378 3371 mutex_exit(&stmf_state.stmf_lock);
3379 3372 return (STMF_SUCCESS);
3380 3373 }
3381 3374
3382 3375 /*
3383 3376 * Rport id/instance mappings remain valid until STMF is unloaded
3384 3377 */
3385 3378 static int
3386 3379 stmf_irport_compare(const void *void_irport1, const void *void_irport2)
3387 3380 {
3388 3381 const stmf_i_remote_port_t *irport1 = void_irport1;
3389 3382 const stmf_i_remote_port_t *irport2 = void_irport2;
3390 3383 int result;
3391 3384
3392 3385 /* Sort by code set then ident */
3393 3386 if (irport1->irport_id->code_set <
3394 3387 irport2->irport_id->code_set) {
3395 3388 return (-1);
3396 3389 } else if (irport1->irport_id->code_set >
3397 3390 irport2->irport_id->code_set) {
3398 3391 return (1);
3399 3392 }
3400 3393
3401 3394 /* Next by ident length */
3402 3395 if (irport1->irport_id->ident_length <
3403 3396 irport2->irport_id->ident_length) {
3404 3397 return (-1);
3405 3398 } else if (irport1->irport_id->ident_length >
3406 3399 irport2->irport_id->ident_length) {
3407 3400 return (1);
3408 3401 }
3409 3402
3410 3403 /* Code set and ident length both match, now compare idents */
3411 3404 result = memcmp(irport1->irport_id->ident,
3412 3405 irport2->irport_id->ident,
3413 3406 irport1->irport_id->ident_length);
3414 3407
3415 3408 if (result < 0) {
3416 3409 return (-1);
3417 3410 } else if (result > 0) {
3418 3411 return (1);
3419 3412 }
3420 3413
3421 3414 return (0);
3422 3415 }
3423 3416
3424 3417 static stmf_i_remote_port_t *
3425 3418 stmf_irport_create(scsi_devid_desc_t *rport_devid)
3426 3419 {
3427 3420 int alloc_len;
3428 3421 stmf_i_remote_port_t *irport;
3429 3422
3430 3423 /*
3431 3424 * Lookup will bump the refcnt if there's an existing rport
3432 3425 * context for this identifier.
3433 3426 */
3434 3427 ASSERT(mutex_owned(&stmf_state.stmf_lock));
3435 3428
3436 3429 alloc_len = sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3437 3430 rport_devid->ident_length - 1;
3438 3431 irport = kmem_zalloc(alloc_len, KM_NOSLEEP);
3439 3432 if (irport == NULL) {
3440 3433 return (NULL);
3441 3434 }
3442 3435
3443 3436 irport->irport_instance =
3444 3437 id_alloc_nosleep(stmf_state.stmf_irport_inst_space);
3445 3438 if (irport->irport_instance == -1) {
3446 3439 kmem_free(irport, alloc_len);
3447 3440 return (NULL);
3448 3441 }
3449 3442
3450 3443 irport->irport_id =
3451 3444 (struct scsi_devid_desc *)(irport + 1); /* Ptr. Arith. */
3452 3445 bcopy(rport_devid, irport->irport_id,
3453 3446 sizeof (scsi_devid_desc_t) + rport_devid->ident_length - 1);
3454 3447 irport->irport_refcnt = 1;
3455 3448 mutex_init(&irport->irport_mutex, NULL, MUTEX_DEFAULT, NULL);
3456 3449
3457 3450 return (irport);
3458 3451 }
3459 3452
3460 3453 static void
3461 3454 stmf_irport_destroy(stmf_i_remote_port_t *irport)
3462 3455 {
3463 3456 id_free(stmf_state.stmf_irport_inst_space, irport->irport_instance);
3464 3457 mutex_destroy(&irport->irport_mutex);
3465 3458 kmem_free(irport, sizeof (*irport) + sizeof (scsi_devid_desc_t) +
3466 3459 irport->irport_id->ident_length - 1);
3467 3460 }
3468 3461
3469 3462 static stmf_i_remote_port_t *
3470 3463 stmf_irport_register(scsi_devid_desc_t *rport_devid)
3471 3464 {
3472 3465 stmf_i_remote_port_t *irport;
3473 3466
3474 3467 mutex_enter(&stmf_state.stmf_lock);
3475 3468
3476 3469 /*
3477 3470 * Lookup will bump the refcnt if there's an existing rport
3478 3471 * context for this identifier.
3479 3472 */
3480 3473 if ((irport = stmf_irport_lookup_locked(rport_devid)) != NULL) {
3481 3474 mutex_exit(&stmf_state.stmf_lock);
3482 3475 return (irport);
3483 3476 }
3484 3477
3485 3478 irport = stmf_irport_create(rport_devid);
3486 3479 if (irport == NULL) {
3487 3480 mutex_exit(&stmf_state.stmf_lock);
3488 3481 return (NULL);
3489 3482 }
3490 3483
3491 3484 avl_add(&stmf_state.stmf_irportlist, irport);
3492 3485 mutex_exit(&stmf_state.stmf_lock);
3493 3486
3494 3487 return (irport);
3495 3488 }
3496 3489
3497 3490 static stmf_i_remote_port_t *
3498 3491 stmf_irport_lookup_locked(scsi_devid_desc_t *rport_devid)
3499 3492 {
3500 3493 stmf_i_remote_port_t *irport;
3501 3494 stmf_i_remote_port_t tmp_irport;
3502 3495
3503 3496 ASSERT(mutex_owned(&stmf_state.stmf_lock));
3504 3497 tmp_irport.irport_id = rport_devid;
3505 3498 irport = avl_find(&stmf_state.stmf_irportlist, &tmp_irport, NULL);
3506 3499 if (irport != NULL) {
3507 3500 mutex_enter(&irport->irport_mutex);
3508 3501 irport->irport_refcnt++;
3509 3502 mutex_exit(&irport->irport_mutex);
3510 3503 }
3511 3504
3512 3505 return (irport);
3513 3506 }
3514 3507
3515 3508 static void
3516 3509 stmf_irport_deregister(stmf_i_remote_port_t *irport)
3517 3510 {
3518 3511 /*
3519 3512 * If we were actually going to remove unreferenced remote ports
3520 3513 * we would want to acquire stmf_state.stmf_lock before getting
3521 3514 * the irport mutex.
3522 3515 *
3523 3516 * Instead we're just going to leave it there even if unreferenced.
3524 3517 */
3525 3518 mutex_enter(&irport->irport_mutex);
3526 3519 irport->irport_refcnt--;
3527 3520 mutex_exit(&irport->irport_mutex);
3528 3521 }
3529 3522
3530 3523 /*
3531 3524 * Port provider has to make sure that register/deregister session and
3532 3525 * port are serialized calls.
3533 3526 */
3534 3527 stmf_status_t
3535 3528 stmf_register_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3536 3529 {
3537 3530 stmf_i_scsi_session_t *iss;
3538 3531 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3539 3532 lport->lport_stmf_private;
3540 3533 uint8_t lun[8];
3541 3534
3542 3535 /*
3543 3536 * Port state has to be online to register a scsi session. It is
3544 3537 * possible that we started an offline operation and a new SCSI
3545 3538 * session started at the same time (in that case also we are going
3546 3539 * to fail the registeration). But any other state is simply
3547 3540 * a bad port provider implementation.
3548 3541 */
3549 3542 if (ilport->ilport_state != STMF_STATE_ONLINE) {
3550 3543 if (ilport->ilport_state != STMF_STATE_OFFLINING) {
3551 3544 stmf_trace(lport->lport_alias, "Port is trying to "
3552 3545 "register a session while the state is neither "
3553 3546 "online nor offlining");
3554 3547 }
3555 3548 return (STMF_FAILURE);
3556 3549 }
3557 3550 bzero(lun, 8);
3558 3551 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3559 3552 if ((iss->iss_irport = stmf_irport_register(ss->ss_rport_id)) == NULL) {
3560 3553 stmf_trace(lport->lport_alias, "Could not register "
3561 3554 "remote port during session registration");
3562 3555 return (STMF_FAILURE);
3563 3556 }
3564 3557
3565 3558 iss->iss_flags |= ISS_BEING_CREATED;
3566 3559
3567 3560 if (ss->ss_rport == NULL) {
3568 3561 iss->iss_flags |= ISS_NULL_TPTID;
3569 3562 ss->ss_rport = stmf_scsilib_devid_to_remote_port(
3570 3563 ss->ss_rport_id);
3571 3564 if (ss->ss_rport == NULL) {
3572 3565 iss->iss_flags &= ~(ISS_NULL_TPTID | ISS_BEING_CREATED);
3573 3566 stmf_trace(lport->lport_alias, "Device id to "
3574 3567 "remote port conversion failed");
3575 3568 return (STMF_FAILURE);
3576 3569 }
3577 3570 } else {
3578 3571 if (!stmf_scsilib_tptid_validate(ss->ss_rport->rport_tptid,
3579 3572 ss->ss_rport->rport_tptid_sz, NULL)) {
3580 3573 iss->iss_flags &= ~ISS_BEING_CREATED;
3581 3574 stmf_trace(lport->lport_alias, "Remote port "
3582 3575 "transport id validation failed");
3583 3576 return (STMF_FAILURE);
3584 3577 }
3585 3578 }
3586 3579
3587 3580 /* sessions use the ilport_lock. No separate lock is required */
3588 3581 iss->iss_lockp = &ilport->ilport_lock;
3589 3582
3590 3583 if (iss->iss_sm != NULL)
3591 3584 cmn_err(CE_PANIC, "create lun map called with non NULL map");
3592 3585 iss->iss_sm = (stmf_lun_map_t *)kmem_zalloc(sizeof (stmf_lun_map_t),
3593 3586 KM_SLEEP);
3594 3587
3595 3588 mutex_enter(&stmf_state.stmf_lock);
3596 3589 rw_enter(&ilport->ilport_lock, RW_WRITER);
3597 3590 (void) stmf_session_create_lun_map(ilport, iss);
3598 3591 ilport->ilport_nsessions++;
3599 3592 iss->iss_next = ilport->ilport_ss_list;
3600 3593 ilport->ilport_ss_list = iss;
3601 3594 rw_exit(&ilport->ilport_lock);
3602 3595 mutex_exit(&stmf_state.stmf_lock);
3603 3596
3604 3597 iss->iss_creation_time = ddi_get_time();
3605 3598 ss->ss_session_id = atomic_add_64_nv(&stmf_session_counter, 1);
3606 3599 iss->iss_flags &= ~ISS_BEING_CREATED;
3607 3600 /* XXX should we remove ISS_LUN_INVENTORY_CHANGED on new session? */
3608 3601 iss->iss_flags &= ~ISS_LUN_INVENTORY_CHANGED;
3609 3602 DTRACE_PROBE2(session__online, stmf_local_port_t *, lport,
3610 3603 stmf_scsi_session_t *, ss);
3611 3604 return (STMF_SUCCESS);
3612 3605 }
3613 3606
3614 3607 void
3615 3608 stmf_deregister_scsi_session(stmf_local_port_t *lport, stmf_scsi_session_t *ss)
3616 3609 {
3617 3610 stmf_i_local_port_t *ilport = (stmf_i_local_port_t *)
3618 3611 lport->lport_stmf_private;
3619 3612 stmf_i_scsi_session_t *iss, **ppss;
3620 3613 int found = 0;
3621 3614 stmf_ic_msg_t *ic_session_dereg;
3622 3615 stmf_status_t ic_ret = STMF_FAILURE;
3623 3616
3624 3617 DTRACE_PROBE2(session__offline, stmf_local_port_t *, lport,
3625 3618 stmf_scsi_session_t *, ss);
3626 3619
3627 3620 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
3628 3621 if (ss->ss_rport_alias) {
3629 3622 ss->ss_rport_alias = NULL;
3630 3623 }
3631 3624
3632 3625 try_dereg_ss_again:
3633 3626 mutex_enter(&stmf_state.stmf_lock);
3634 3627 atomic_and_32(&iss->iss_flags,
3635 3628 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
3636 3629 if (iss->iss_flags & ISS_EVENT_ACTIVE) {
3637 3630 mutex_exit(&stmf_state.stmf_lock);
3638 3631 delay(1);
3639 3632 goto try_dereg_ss_again;
3640 3633 }
3641 3634
3642 3635 /* dereg proxy session if not standby port */
3643 3636 if (stmf_state.stmf_alua_state == 1 &&
3644 3637 ilport->ilport_standby == 0 &&
3645 3638 ilport->ilport_alua == 1) {
3646 3639 ic_session_dereg = ic_session_dereg_msg_alloc(
3647 3640 ss, stmf_proxy_msg_id);
3648 3641 if (ic_session_dereg) {
3649 3642 ic_ret = ic_tx_msg(ic_session_dereg);
3650 3643 if (ic_ret == STMF_IC_MSG_SUCCESS) {
3651 3644 stmf_proxy_msg_id++;
3652 3645 }
3653 3646 }
3654 3647 }
3655 3648
3656 3649 rw_enter(&ilport->ilport_lock, RW_WRITER);
3657 3650 for (ppss = &ilport->ilport_ss_list; *ppss != NULL;
3658 3651 ppss = &((*ppss)->iss_next)) {
3659 3652 if (iss == (*ppss)) {
3660 3653 *ppss = (*ppss)->iss_next;
3661 3654 found = 1;
3662 3655 break;
3663 3656 }
3664 3657 }
3665 3658 if (!found) {
3666 3659 cmn_err(CE_PANIC, "Deregister session called for non existent"
3667 3660 " session");
3668 3661 }
3669 3662 ilport->ilport_nsessions--;
3670 3663
3671 3664 stmf_irport_deregister(iss->iss_irport);
3672 3665 (void) stmf_session_destroy_lun_map(ilport, iss);
3673 3666 rw_exit(&ilport->ilport_lock);
3674 3667 mutex_exit(&stmf_state.stmf_lock);
3675 3668
3676 3669 if (iss->iss_flags & ISS_NULL_TPTID) {
3677 3670 stmf_remote_port_free(ss->ss_rport);
3678 3671 }
3679 3672 }
3680 3673
3681 3674 stmf_i_scsi_session_t *
3682 3675 stmf_session_id_to_issptr(uint64_t session_id, int stay_locked)
3683 3676 {
3684 3677 stmf_i_local_port_t *ilport;
3685 3678 stmf_i_scsi_session_t *iss;
3686 3679
3687 3680 mutex_enter(&stmf_state.stmf_lock);
3688 3681 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
3689 3682 ilport = ilport->ilport_next) {
3690 3683 rw_enter(&ilport->ilport_lock, RW_WRITER);
3691 3684 for (iss = ilport->ilport_ss_list; iss != NULL;
3692 3685 iss = iss->iss_next) {
3693 3686 if (iss->iss_ss->ss_session_id == session_id) {
3694 3687 if (!stay_locked)
3695 3688 rw_exit(&ilport->ilport_lock);
|
↓ open down ↓ |
308 lines elided |
↑ open up ↑ |
3696 3689 mutex_exit(&stmf_state.stmf_lock);
3697 3690 return (iss);
3698 3691 }
3699 3692 }
3700 3693 rw_exit(&ilport->ilport_lock);
3701 3694 }
3702 3695 mutex_exit(&stmf_state.stmf_lock);
3703 3696 return (NULL);
3704 3697 }
3705 3698
3706 -#define MAX_ALIAS 128
3707 -
3708 -static int
3709 -stmf_itl_kstat_compare(const void *itl_kstat_1, const void *itl_kstat_2)
3710 -{
3711 - const stmf_i_itl_kstat_t *kstat_nm1 = itl_kstat_1;
3712 - const stmf_i_itl_kstat_t *kstat_nm2 = itl_kstat_2;
3713 - int ret;
3714 -
3715 - ret = strcmp(kstat_nm1->iitl_kstat_nm, kstat_nm2->iitl_kstat_nm);
3716 - if (ret < 0) {
3717 - return (-1);
3718 - } else if (ret > 0) {
3719 - return (1);
3720 - }
3721 - return (0);
3722 -}
3723 -
3724 -static stmf_i_itl_kstat_t *
3725 -stmf_itl_kstat_lookup(char *kstat_nm)
3726 -{
3727 - stmf_i_itl_kstat_t tmp;
3728 - stmf_i_itl_kstat_t *itl_kstat;
3729 -
3730 - ASSERT(mutex_owned(&stmf_state.stmf_lock));
3731 - (void) strcpy(tmp.iitl_kstat_nm, kstat_nm);
3732 - itl_kstat = avl_find(&stmf_state.stmf_itl_kstat_list, &tmp, NULL);
3733 - return (itl_kstat);
3734 -}
3735 -
3736 -static void
3737 -stmf_delete_itl_kstat_by_lport(char *tgt)
3738 -{
3739 - stmf_i_itl_kstat_t *ks_itl, *next;
3740 -
3741 - ASSERT(mutex_owned(&stmf_state.stmf_lock));
3742 - ks_itl = avl_first(&stmf_state.stmf_itl_kstat_list);
3743 - for (; ks_itl != NULL; ks_itl = next) {
3744 - next = AVL_NEXT(&stmf_state.stmf_itl_kstat_list, ks_itl);
3745 - if (strcmp(ks_itl->iitl_kstat_lport, tgt) == 0) {
3746 - stmf_teardown_itl_kstats(ks_itl);
3747 - avl_remove(&stmf_state.stmf_itl_kstat_list, ks_itl);
3748 - kmem_free(ks_itl, sizeof (stmf_i_itl_kstat_t));
3749 - }
3750 - }
3751 -}
3752 -
3753 -static void
3754 -stmf_delete_itl_kstat_by_guid(char *guid)
3755 -{
3756 - stmf_i_itl_kstat_t *ks_itl, *next;
3757 -
3758 - ASSERT(mutex_owned(&stmf_state.stmf_lock));
3759 - ks_itl = avl_first(&stmf_state.stmf_itl_kstat_list);
3760 - for (; ks_itl != NULL; ks_itl = next) {
3761 - next = AVL_NEXT(&stmf_state.stmf_itl_kstat_list, ks_itl);
3762 - if (strcmp(ks_itl->iitl_kstat_guid, guid) == 0) {
3763 - stmf_teardown_itl_kstats(ks_itl);
3764 - avl_remove(&stmf_state.stmf_itl_kstat_list, ks_itl);
3765 - kmem_free(ks_itl, sizeof (stmf_i_itl_kstat_t));
3766 - }
3767 - }
3768 -}
3769 -
3770 -static stmf_i_itl_kstat_t *
3771 -stmf_itl_kstat_create(stmf_itl_data_t *itl, char *nm,
3772 - scsi_devid_desc_t *lport, scsi_devid_desc_t *lun)
3773 -{
3774 - stmf_i_itl_kstat_t *ks_itl;
3775 - int i, len;
3776 -
3777 - ASSERT(mutex_owned(&stmf_state.stmf_lock));
3778 - if ((ks_itl = stmf_itl_kstat_lookup(nm)) != NULL)
3779 - return (ks_itl);
3780 -
3781 - len = sizeof (stmf_i_itl_kstat_t);
3782 - ks_itl = kmem_zalloc(len, KM_NOSLEEP);
3783 - if (ks_itl == NULL)
3784 - return (NULL);
3785 -
3786 - (void) strcpy(ks_itl->iitl_kstat_nm, nm);
3787 - bcopy(lport->ident, ks_itl->iitl_kstat_lport, lport->ident_length);
3788 - ks_itl->iitl_kstat_lport[lport->ident_length] = '\0';
3789 - for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
3790 - (void) sprintf(&ks_itl->iitl_kstat_guid[i * 2], "%02x",
3791 - lun->ident[i]);
3792 - }
3793 - ks_itl->iitl_kstat_strbuf = itl->itl_kstat_strbuf;
3794 - ks_itl->iitl_kstat_strbuflen = itl->itl_kstat_strbuflen;
3795 - ks_itl->iitl_kstat_info = itl->itl_kstat_info;
3796 - ks_itl->iitl_kstat_taskq = itl->itl_kstat_taskq;
3797 - ks_itl->iitl_kstat_lu_xfer = itl->itl_kstat_lu_xfer;
3798 - ks_itl->iitl_kstat_lport_xfer = itl->itl_kstat_lport_xfer;
3799 - avl_add(&stmf_state.stmf_itl_kstat_list, ks_itl);
3800 -
3801 - return (ks_itl);
3802 -}
3803 -
3804 -stmf_status_t
3805 -stmf_setup_itl_kstats(stmf_itl_data_t *itl)
3806 -{
3807 - char ks_itl_id[32];
3808 - char ks_nm[KSTAT_STRLEN];
3809 - char ks_itl_nm[KSTAT_STRLEN];
3810 - stmf_kstat_itl_info_t *ks_itl;
3811 - stmf_scsi_session_t *ss;
3812 - stmf_i_scsi_session_t *iss;
3813 - stmf_i_local_port_t *ilport;
3814 - char *strbuf;
3815 - int id, len, i;
3816 - char *rport_alias;
3817 - char *lport_alias;
3818 - char *lu_alias;
3819 - stmf_i_itl_kstat_t *tmp_kstat;
3820 -
3821 - /*
3822 - * Allocate enough memory in the ITL to hold the relevant
3823 - * identifiers.
3824 - * rport and lport identifiers come from the stmf_scsi_session_t.
3825 - * ident might not be null terminated.
3826 - */
3827 - ss = itl->itl_session->iss_ss;
3828 - iss = ss->ss_stmf_private;
3829 - ilport = ss->ss_lport->lport_stmf_private;
3830 - (void) snprintf(ks_itl_id, 32, "%d.%d.%d",
3831 - iss->iss_irport->irport_instance, ilport->ilport_instance,
3832 - itl->itl_lun);
3833 -
3834 - (void) snprintf(ks_itl_nm, KSTAT_STRLEN, "itl_%s", ks_itl_id);
3835 - /*
3836 - * let's verify this itl_kstat already exist
3837 - */
3838 - if ((tmp_kstat = stmf_itl_kstat_lookup(ks_itl_nm)) != NULL) {
3839 - itl->itl_kstat_strbuf = tmp_kstat->iitl_kstat_strbuf;
3840 - itl->itl_kstat_strbuflen = tmp_kstat->iitl_kstat_strbuflen;
3841 - itl->itl_kstat_info = tmp_kstat->iitl_kstat_info;
3842 - itl->itl_kstat_taskq = tmp_kstat->iitl_kstat_taskq;
3843 - itl->itl_kstat_lu_xfer = tmp_kstat->iitl_kstat_lu_xfer;
3844 - itl->itl_kstat_lport_xfer = tmp_kstat->iitl_kstat_lport_xfer;
3845 - return (STMF_SUCCESS);
3846 - }
3847 -
3848 - /* New itl_kstat */
3849 - rport_alias = (ss->ss_rport_alias == NULL) ?
3850 - "" : ss->ss_rport_alias;
3851 - lport_alias = (ss->ss_lport->lport_alias == NULL) ?
3852 - "" : ss->ss_lport->lport_alias;
3853 - lu_alias = (itl->itl_ilu->ilu_lu->lu_alias == NULL) ?
3854 - "" : itl->itl_ilu->ilu_lu->lu_alias;
3855 -
3856 - itl->itl_kstat_strbuflen = (ss->ss_rport_id->ident_length + 1) +
3857 - (strnlen(rport_alias, MAX_ALIAS) + 1) +
3858 - (ss->ss_lport->lport_id->ident_length + 1) +
3859 - (strnlen(lport_alias, MAX_ALIAS) + 1) +
3860 - (STMF_GUID_INPUT + 1) +
3861 - (strnlen(lu_alias, MAX_ALIAS) + 1) +
3862 - MAX_PROTO_STR_LEN;
3863 - itl->itl_kstat_strbuf = kmem_zalloc(itl->itl_kstat_strbuflen,
3864 - KM_NOSLEEP);
3865 - if (itl->itl_kstat_strbuf == NULL) {
3866 - return (STMF_ALLOC_FAILURE);
3867 - }
3868 -
3869 - ks_itl = (stmf_kstat_itl_info_t *)kmem_zalloc(sizeof (*ks_itl),
3870 - KM_NOSLEEP);
3871 - if (ks_itl == NULL) {
3872 - kmem_free(itl->itl_kstat_strbuf, itl->itl_kstat_strbuflen);
3873 - return (STMF_ALLOC_FAILURE);
3874 - }
3875 -
3876 - if ((itl->itl_kstat_info = kstat_create(STMF_MODULE_NAME,
3877 - 0, ks_itl_nm, "misc", KSTAT_TYPE_NAMED,
3878 - sizeof (stmf_kstat_itl_info_t) / sizeof (kstat_named_t),
3879 - KSTAT_FLAG_VIRTUAL)) == NULL) {
3880 - goto itl_kstat_cleanup;
3881 - }
3882 -
3883 - itl->itl_kstat_info->ks_data_size += itl->itl_kstat_strbuflen;
3884 - itl->itl_kstat_info->ks_data = ks_itl;
3885 -
3886 - kstat_named_init(&ks_itl->i_rport_name, "rport-name",
3887 - KSTAT_DATA_STRING);
3888 - kstat_named_init(&ks_itl->i_rport_alias, "rport-alias",
3889 - KSTAT_DATA_STRING);
3890 - kstat_named_init(&ks_itl->i_lport_name, "lport-name",
3891 - KSTAT_DATA_STRING);
3892 - kstat_named_init(&ks_itl->i_lport_alias, "lport-alias",
3893 - KSTAT_DATA_STRING);
3894 - kstat_named_init(&ks_itl->i_protocol, "protocol",
3895 - KSTAT_DATA_STRING);
3896 - kstat_named_init(&ks_itl->i_lu_guid, "lu-guid",
3897 - KSTAT_DATA_STRING);
3898 - kstat_named_init(&ks_itl->i_lu_alias, "lu-alias",
3899 - KSTAT_DATA_STRING);
3900 - kstat_named_init(&ks_itl->i_lu_number, "lu-number",
3901 - KSTAT_DATA_UINT64);
3902 - kstat_named_init(&ks_itl->i_task_waitq_elapsed, "task-waitq-elapsed",
3903 - KSTAT_DATA_UINT64);
3904 - kstat_named_init(&ks_itl->i_task_read_elapsed, "task-read-elapsed",
3905 - KSTAT_DATA_UINT64);
3906 - kstat_named_init(&ks_itl->i_task_write_elapsed, "task-write-elapsed",
3907 - KSTAT_DATA_UINT64);
3908 - kstat_named_init(&ks_itl->i_lu_read_elapsed, "lu-read-elapsed",
3909 - KSTAT_DATA_UINT64);
3910 - kstat_named_init(&ks_itl->i_lu_write_elapsed, "lu-write-elapsed",
3911 - KSTAT_DATA_UINT64);
3912 - kstat_named_init(&ks_itl->i_lport_read_elapsed, "lport-read-elapsed",
3913 - KSTAT_DATA_UINT64);
3914 - kstat_named_init(&ks_itl->i_lport_write_elapsed, "lport-write-elapsed",
3915 - KSTAT_DATA_UINT64);
3916 -
3917 - strbuf = itl->itl_kstat_strbuf;
3918 -
3919 - /* Rport */
3920 - len = ss->ss_rport_id->ident_length;
3921 - bcopy(ss->ss_rport_id->ident, strbuf, len);
3922 - strbuf += len;
3923 - *strbuf = '\0';
3924 - kstat_named_setstr(&ks_itl->i_rport_name, strbuf - len);
3925 - strbuf++;
3926 -
3927 - len = strnlen(rport_alias, MAX_ALIAS);
3928 - (void) strncpy(strbuf, rport_alias, len + 1);
3929 - kstat_named_setstr(&ks_itl->i_rport_alias, strbuf);
3930 - strbuf += len + 1;
3931 -
3932 - /* Lport */
3933 - len = ss->ss_lport->lport_id->ident_length;
3934 - bcopy(ss->ss_lport->lport_id->ident, strbuf, len);
3935 - strbuf += len;
3936 - *strbuf = '\0';
3937 - kstat_named_setstr(&ks_itl->i_lport_name, strbuf - len);
3938 - strbuf++;
3939 -
3940 - len = strnlen(lport_alias, MAX_ALIAS);
3941 - (void) strncpy(strbuf, lport_alias, len + 1);
3942 - kstat_named_setstr(&ks_itl->i_lport_alias, strbuf);
3943 - strbuf += len + 1;
3944 -
3945 - id = (ss->ss_lport->lport_id->protocol_id > PROTOCOL_ANY) ?
3946 - PROTOCOL_ANY : ss->ss_lport->lport_id->protocol_id;
3947 - kstat_named_setstr(&ks_itl->i_protocol, protocol_ident[id]);
3948 -
3949 - /* LU */
3950 - for (i = 0; i < STMF_GUID_INPUT / 2; i++) {
3951 - (void) sprintf(&strbuf[i * 2], "%02x",
3952 - itl->itl_ilu->ilu_lu->lu_id->ident[i]);
3953 - }
3954 - kstat_named_setstr(&ks_itl->i_lu_guid, strbuf);
3955 - strbuf += STMF_GUID_INPUT + 1;
3956 -
3957 - len = strnlen(lu_alias, MAX_ALIAS);
3958 - (void) strncpy(strbuf, lu_alias, len + 1);
3959 - kstat_named_setstr(&ks_itl->i_lu_alias, strbuf);
3960 - strbuf += len + 1;
3961 -
3962 - ks_itl->i_lu_number.value.ui64 = itl->itl_lun;
3963 -
3964 - /* Now create the I/O kstats */
3965 - (void) snprintf(ks_nm, KSTAT_STRLEN, "itl_tasks_%s", ks_itl_id);
3966 - if ((itl->itl_kstat_taskq = kstat_create(STMF_MODULE_NAME, 0,
3967 - ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3968 - goto itl_kstat_cleanup;
3969 - }
3970 -
3971 - (void) snprintf(ks_nm, KSTAT_STRLEN, "itl_lu_%s", ks_itl_id);
3972 - if ((itl->itl_kstat_lu_xfer = kstat_create(STMF_MODULE_NAME, 0,
3973 - ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3974 - goto itl_kstat_cleanup;
3975 - }
3976 -
3977 - (void) snprintf(ks_nm, KSTAT_STRLEN, "itl_lport_%s", ks_itl_id);
3978 - if ((itl->itl_kstat_lport_xfer = kstat_create(STMF_MODULE_NAME, 0,
3979 - ks_nm, "io", KSTAT_TYPE_IO, 1, 0)) == NULL) {
3980 - goto itl_kstat_cleanup;
3981 - }
3982 -
3983 - /* Install all the kstats */
3984 - kstat_install(itl->itl_kstat_info);
3985 - kstat_install(itl->itl_kstat_taskq);
3986 - kstat_install(itl->itl_kstat_lu_xfer);
3987 - kstat_install(itl->itl_kstat_lport_xfer);
3988 -
3989 - /* Add new itl_kstat to stmf_itl_kstat_list */
3990 - if (stmf_itl_kstat_create(itl, ks_itl_nm, ss->ss_lport->lport_id,
3991 - itl->itl_ilu->ilu_lu->lu_id) != NULL)
3992 - return (STMF_SUCCESS);
3993 -
3994 -itl_kstat_cleanup:
3995 - if (itl->itl_kstat_taskq)
3996 - kstat_delete(itl->itl_kstat_taskq);
3997 - if (itl->itl_kstat_lu_xfer)
3998 - kstat_delete(itl->itl_kstat_lu_xfer);
3999 - if (itl->itl_kstat_lport_xfer)
4000 - kstat_delete(itl->itl_kstat_lport_xfer);
4001 - if (itl->itl_kstat_info)
4002 - kstat_delete(itl->itl_kstat_info);
4003 - kmem_free(ks_itl, sizeof (*ks_itl));
4004 - kmem_free(itl->itl_kstat_strbuf, itl->itl_kstat_strbuflen);
4005 - cmn_err(CE_WARN, "STMF: kstat_create itl failed");
4006 - return (STMF_ALLOC_FAILURE);
4007 -}
4008 -
4009 -static void
4010 -stmf_teardown_itl_kstats(stmf_i_itl_kstat_t *ks)
4011 -{
4012 - kstat_delete(ks->iitl_kstat_lport_xfer);
4013 - kstat_delete(ks->iitl_kstat_lu_xfer);
4014 - kstat_delete(ks->iitl_kstat_taskq);
4015 - kmem_free(ks->iitl_kstat_info->ks_data, sizeof (stmf_kstat_itl_info_t));
4016 - kstat_delete(ks->iitl_kstat_info);
4017 - kmem_free(ks->iitl_kstat_strbuf, ks->iitl_kstat_strbuflen);
4018 -}
4019 -
4020 3699 void
4021 3700 stmf_release_itl_handle(stmf_lu_t *lu, stmf_itl_data_t *itl)
4022 3701 {
4023 3702 stmf_itl_data_t **itlpp;
4024 3703 stmf_i_lu_t *ilu;
4025 3704
4026 3705 ASSERT(itl->itl_flags & STMF_ITL_BEING_TERMINATED);
4027 3706
4028 3707 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4029 3708 mutex_enter(&ilu->ilu_task_lock);
4030 3709 for (itlpp = &ilu->ilu_itl_list; (*itlpp) != NULL;
4031 3710 itlpp = &(*itlpp)->itl_next) {
4032 3711 if ((*itlpp) == itl)
4033 3712 break;
4034 3713 }
4035 3714 ASSERT((*itlpp) != NULL);
4036 3715 *itlpp = itl->itl_next;
4037 3716 mutex_exit(&ilu->ilu_task_lock);
4038 3717 lu->lu_abort(lu, STMF_LU_ITL_HANDLE_REMOVED, itl->itl_handle,
4039 3718 (uint32_t)itl->itl_hdlrm_reason);
4040 3719
4041 3720 kmem_free(itl, sizeof (*itl));
4042 3721 }
4043 3722
4044 3723 stmf_status_t
4045 3724 stmf_register_itl_handle(stmf_lu_t *lu, uint8_t *lun,
4046 3725 stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle)
4047 3726 {
4048 3727 stmf_itl_data_t *itl;
4049 3728 stmf_i_scsi_session_t *iss;
4050 3729 stmf_lun_map_ent_t *lun_map_ent;
4051 3730 stmf_i_lu_t *ilu;
4052 3731 uint16_t n;
|
↓ open down ↓ |
23 lines elided |
↑ open up ↑ |
4053 3732
4054 3733 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4055 3734 if (ss == NULL) {
4056 3735 iss = stmf_session_id_to_issptr(session_id, 1);
4057 3736 if (iss == NULL)
4058 3737 return (STMF_NOT_FOUND);
4059 3738 } else {
4060 3739 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4061 3740 }
4062 3741
4063 - /*
4064 - * Acquire stmf_lock for stmf_itl_kstat_lookup.
4065 - */
4066 3742 mutex_enter(&stmf_state.stmf_lock);
4067 3743 rw_enter(iss->iss_lockp, RW_WRITER);
4068 3744 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4069 3745 lun_map_ent = (stmf_lun_map_ent_t *)
4070 3746 stmf_get_ent_from_map(iss->iss_sm, n);
4071 3747 if ((lun_map_ent == NULL) || (lun_map_ent->ent_lu != lu)) {
4072 3748 rw_exit(iss->iss_lockp);
4073 3749 mutex_exit(&stmf_state.stmf_lock);
4074 3750 return (STMF_NOT_FOUND);
4075 3751 }
4076 3752 if (lun_map_ent->ent_itl_datap != NULL) {
4077 3753 rw_exit(iss->iss_lockp);
4078 3754 mutex_exit(&stmf_state.stmf_lock);
4079 3755 return (STMF_ALREADY);
4080 3756 }
4081 3757
4082 3758 itl = (stmf_itl_data_t *)kmem_zalloc(sizeof (*itl), KM_NOSLEEP);
4083 3759 if (itl == NULL) {
4084 3760 rw_exit(iss->iss_lockp);
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
4085 3761 mutex_exit(&stmf_state.stmf_lock);
4086 3762 return (STMF_ALLOC_FAILURE);
4087 3763 }
4088 3764
4089 3765 itl->itl_ilu = ilu;
4090 3766 itl->itl_session = iss;
4091 3767 itl->itl_counter = 1;
4092 3768 itl->itl_lun = n;
4093 3769 itl->itl_handle = itl_handle;
4094 3770
4095 - if (stmf_setup_itl_kstats(itl) != STMF_SUCCESS) {
4096 - kmem_free(itl, sizeof (*itl));
4097 - rw_exit(iss->iss_lockp);
4098 - mutex_exit(&stmf_state.stmf_lock);
4099 - return (STMF_ALLOC_FAILURE);
4100 - }
4101 -
4102 3771 mutex_enter(&ilu->ilu_task_lock);
4103 3772 itl->itl_next = ilu->ilu_itl_list;
4104 3773 ilu->ilu_itl_list = itl;
4105 3774 mutex_exit(&ilu->ilu_task_lock);
4106 3775 lun_map_ent->ent_itl_datap = itl;
4107 3776 rw_exit(iss->iss_lockp);
4108 3777 mutex_exit(&stmf_state.stmf_lock);
4109 3778
4110 3779 return (STMF_SUCCESS);
4111 3780 }
4112 3781
4113 3782 void
4114 3783 stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl, uint8_t hdlrm_reason)
4115 3784 {
4116 3785 uint8_t old, new;
4117 3786
4118 3787 do {
4119 3788 old = new = itl->itl_flags;
4120 3789 if (old & STMF_ITL_BEING_TERMINATED)
|
↓ open down ↓ |
9 lines elided |
↑ open up ↑ |
4121 3790 return;
4122 3791 new |= STMF_ITL_BEING_TERMINATED;
4123 3792 } while (atomic_cas_8(&itl->itl_flags, old, new) != old);
4124 3793 itl->itl_hdlrm_reason = hdlrm_reason;
4125 3794
4126 3795 ASSERT(itl->itl_counter);
4127 3796
4128 3797 if (atomic_add_32_nv(&itl->itl_counter, -1))
4129 3798 return;
4130 3799
4131 - drv_usecwait(10);
4132 - if (itl->itl_counter)
4133 - return;
4134 -
4135 3800 stmf_release_itl_handle(lu, itl);
4136 3801 }
4137 3802
4138 3803 stmf_status_t
4139 3804 stmf_deregister_all_lu_itl_handles(stmf_lu_t *lu)
4140 3805 {
4141 3806 stmf_i_lu_t *ilu;
4142 3807 stmf_i_local_port_t *ilport;
4143 3808 stmf_i_scsi_session_t *iss;
4144 3809 stmf_lun_map_t *lm;
4145 3810 stmf_lun_map_ent_t *ent;
4146 3811 uint32_t nmaps, nu;
4147 3812 stmf_itl_data_t **itl_list;
4148 3813 int i;
4149 3814
4150 3815 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4151 3816
4152 3817 dereg_itl_start:;
4153 3818 nmaps = ilu->ilu_ref_cnt;
4154 3819 if (nmaps == 0)
4155 3820 return (STMF_NOT_FOUND);
4156 3821 itl_list = (stmf_itl_data_t **)kmem_zalloc(
4157 3822 nmaps * sizeof (stmf_itl_data_t *), KM_SLEEP);
4158 3823 mutex_enter(&stmf_state.stmf_lock);
4159 3824 if (nmaps != ilu->ilu_ref_cnt) {
4160 3825 /* Something changed, start all over */
4161 3826 mutex_exit(&stmf_state.stmf_lock);
4162 3827 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4163 3828 goto dereg_itl_start;
4164 3829 }
4165 3830 nu = 0;
4166 3831 for (ilport = stmf_state.stmf_ilportlist; ilport != NULL;
4167 3832 ilport = ilport->ilport_next) {
4168 3833 rw_enter(&ilport->ilport_lock, RW_WRITER);
4169 3834 for (iss = ilport->ilport_ss_list; iss != NULL;
4170 3835 iss = iss->iss_next) {
4171 3836 lm = iss->iss_sm;
4172 3837 if (!lm)
4173 3838 continue;
4174 3839 for (i = 0; i < lm->lm_nentries; i++) {
4175 3840 if (lm->lm_plus[i] == NULL)
4176 3841 continue;
4177 3842 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4178 3843 if ((ent->ent_lu == lu) &&
4179 3844 (ent->ent_itl_datap)) {
4180 3845 itl_list[nu++] = ent->ent_itl_datap;
4181 3846 ent->ent_itl_datap = NULL;
4182 3847 if (nu == nmaps) {
4183 3848 rw_exit(&ilport->ilport_lock);
4184 3849 goto dai_scan_done;
4185 3850 }
4186 3851 }
4187 3852 } /* lun table for a session */
4188 3853 } /* sessions */
4189 3854 rw_exit(&ilport->ilport_lock);
4190 3855 } /* ports */
4191 3856
4192 3857 dai_scan_done:
4193 3858 mutex_exit(&stmf_state.stmf_lock);
4194 3859
|
↓ open down ↓ |
50 lines elided |
↑ open up ↑ |
4195 3860 for (i = 0; i < nu; i++) {
4196 3861 stmf_do_itl_dereg(lu, itl_list[i],
4197 3862 STMF_ITL_REASON_DEREG_REQUEST);
4198 3863 }
4199 3864 kmem_free(itl_list, nmaps * sizeof (stmf_itl_data_t *));
4200 3865
4201 3866 return (STMF_SUCCESS);
4202 3867 }
4203 3868
4204 3869 stmf_status_t
4205 -stmf_deregister_itl_handle(stmf_lu_t *lu, uint8_t *lun,
4206 - stmf_scsi_session_t *ss, uint64_t session_id, void *itl_handle)
4207 -{
4208 - stmf_i_scsi_session_t *iss;
4209 - stmf_itl_data_t *itl;
4210 - stmf_lun_map_ent_t *ent;
4211 - stmf_lun_map_t *lm;
4212 - int i;
4213 - uint16_t n;
4214 -
4215 - if (ss == NULL) {
4216 - if (session_id == STMF_SESSION_ID_NONE)
4217 - return (STMF_INVALID_ARG);
4218 - iss = stmf_session_id_to_issptr(session_id, 1);
4219 - if (iss == NULL)
4220 - return (STMF_NOT_FOUND);
4221 - } else {
4222 - iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4223 - rw_enter(iss->iss_lockp, RW_WRITER);
4224 - }
4225 - lm = iss->iss_sm;
4226 - if (lm == NULL) {
4227 - rw_exit(iss->iss_lockp);
4228 - return (STMF_NOT_FOUND);
4229 - }
4230 -
4231 - if (lun) {
4232 - n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4233 - ent = (stmf_lun_map_ent_t *)
4234 - stmf_get_ent_from_map(iss->iss_sm, n);
4235 - } else {
4236 - if (itl_handle == NULL) {
4237 - rw_exit(iss->iss_lockp);
4238 - return (STMF_INVALID_ARG);
4239 - }
4240 - ent = NULL;
4241 - for (i = 0; i < lm->lm_nentries; i++) {
4242 - if (lm->lm_plus[i] == NULL)
4243 - continue;
4244 - ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4245 - if (ent->ent_itl_datap &&
4246 - (ent->ent_itl_datap->itl_handle == itl_handle)) {
4247 - break;
4248 - }
4249 - }
4250 - }
4251 - if ((ent == NULL) || (ent->ent_lu != lu) ||
4252 - (ent->ent_itl_datap == NULL)) {
4253 - rw_exit(iss->iss_lockp);
4254 - return (STMF_NOT_FOUND);
4255 - }
4256 - itl = ent->ent_itl_datap;
4257 - ent->ent_itl_datap = NULL;
4258 - rw_exit(iss->iss_lockp);
4259 - stmf_do_itl_dereg(lu, itl, STMF_ITL_REASON_DEREG_REQUEST);
4260 -
4261 - return (STMF_SUCCESS);
4262 -}
4263 -
4264 -stmf_status_t
4265 3870 stmf_get_itl_handle(stmf_lu_t *lu, uint8_t *lun, stmf_scsi_session_t *ss,
4266 3871 uint64_t session_id, void **itl_handle_retp)
4267 3872 {
4268 3873 stmf_i_scsi_session_t *iss;
4269 3874 stmf_lun_map_ent_t *ent;
4270 3875 stmf_lun_map_t *lm;
4271 3876 stmf_status_t ret;
4272 3877 int i;
4273 3878 uint16_t n;
4274 3879
4275 3880 if (ss == NULL) {
4276 3881 iss = stmf_session_id_to_issptr(session_id, 1);
4277 3882 if (iss == NULL)
4278 3883 return (STMF_NOT_FOUND);
4279 3884 } else {
4280 3885 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4281 3886 rw_enter(iss->iss_lockp, RW_WRITER);
4282 3887 }
4283 3888
4284 3889 ent = NULL;
4285 3890 if (lun == NULL) {
4286 3891 lm = iss->iss_sm;
4287 3892 for (i = 0; i < lm->lm_nentries; i++) {
4288 3893 if (lm->lm_plus[i] == NULL)
4289 3894 continue;
4290 3895 ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
4291 3896 if (ent->ent_lu == lu)
4292 3897 break;
4293 3898 }
4294 3899 } else {
4295 3900 n = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4296 3901 ent = (stmf_lun_map_ent_t *)
4297 3902 stmf_get_ent_from_map(iss->iss_sm, n);
4298 3903 if (lu && (ent->ent_lu != lu))
4299 3904 ent = NULL;
4300 3905 }
4301 3906 if (ent && ent->ent_itl_datap) {
4302 3907 *itl_handle_retp = ent->ent_itl_datap->itl_handle;
4303 3908 ret = STMF_SUCCESS;
4304 3909 } else {
4305 3910 ret = STMF_NOT_FOUND;
4306 3911 }
4307 3912
4308 3913 rw_exit(iss->iss_lockp);
4309 3914 return (ret);
4310 3915 }
4311 3916
4312 3917 stmf_data_buf_t *
4313 3918 stmf_alloc_dbuf(scsi_task_t *task, uint32_t size, uint32_t *pminsize,
4314 3919 uint32_t flags)
4315 3920 {
4316 3921 stmf_i_scsi_task_t *itask =
4317 3922 (stmf_i_scsi_task_t *)task->task_stmf_private;
4318 3923 stmf_local_port_t *lport = task->task_lport;
4319 3924 stmf_data_buf_t *dbuf;
4320 3925 uint8_t ndx;
4321 3926
4322 3927 ndx = stmf_first_zero[itask->itask_allocated_buf_map];
4323 3928 if (ndx == 0xff)
4324 3929 return (NULL);
4325 3930 dbuf = itask->itask_dbufs[ndx] = lport->lport_ds->ds_alloc_data_buf(
4326 3931 task, size, pminsize, flags);
4327 3932 if (dbuf) {
4328 3933 task->task_cur_nbufs++;
4329 3934 itask->itask_allocated_buf_map |= (1 << ndx);
4330 3935 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
4331 3936 dbuf->db_handle = ndx;
4332 3937 return (dbuf);
4333 3938 }
4334 3939
4335 3940 return (NULL);
4336 3941 }
4337 3942
4338 3943 stmf_status_t
4339 3944 stmf_setup_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t flags)
4340 3945 {
4341 3946 stmf_i_scsi_task_t *itask =
4342 3947 (stmf_i_scsi_task_t *)task->task_stmf_private;
4343 3948 stmf_local_port_t *lport = task->task_lport;
4344 3949 uint8_t ndx;
4345 3950 stmf_status_t ret;
4346 3951
4347 3952 ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF);
4348 3953 ASSERT(lport->lport_ds->ds_setup_dbuf != NULL);
4349 3954 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
4350 3955
4351 3956 if ((task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF) == 0)
4352 3957 return (STMF_FAILURE);
4353 3958 if (lport->lport_ds->ds_setup_dbuf == NULL)
4354 3959 return (STMF_FAILURE);
4355 3960
4356 3961 ndx = stmf_first_zero[itask->itask_allocated_buf_map];
4357 3962 if (ndx == 0xff)
4358 3963 return (STMF_FAILURE);
4359 3964 ret = lport->lport_ds->ds_setup_dbuf(task, dbuf, flags);
4360 3965 if (ret == STMF_FAILURE)
4361 3966 return (STMF_FAILURE);
4362 3967 itask->itask_dbufs[ndx] = dbuf;
4363 3968 task->task_cur_nbufs++;
4364 3969 itask->itask_allocated_buf_map |= (1 << ndx);
4365 3970 dbuf->db_handle = ndx;
4366 3971
4367 3972 return (STMF_SUCCESS);
4368 3973 }
4369 3974
4370 3975 void
4371 3976 stmf_teardown_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf)
4372 3977 {
4373 3978 stmf_i_scsi_task_t *itask =
4374 3979 (stmf_i_scsi_task_t *)task->task_stmf_private;
4375 3980 stmf_local_port_t *lport = task->task_lport;
4376 3981
4377 3982 ASSERT(task->task_additional_flags & TASK_AF_ACCEPT_LU_DBUF);
4378 3983 ASSERT(lport->lport_ds->ds_teardown_dbuf != NULL);
4379 3984 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
4380 3985
4381 3986 itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle);
4382 3987 task->task_cur_nbufs--;
4383 3988 lport->lport_ds->ds_teardown_dbuf(lport->lport_ds, dbuf);
4384 3989 }
4385 3990
4386 3991 void
4387 3992 stmf_free_dbuf(scsi_task_t *task, stmf_data_buf_t *dbuf)
4388 3993 {
4389 3994 stmf_i_scsi_task_t *itask =
4390 3995 (stmf_i_scsi_task_t *)task->task_stmf_private;
4391 3996 stmf_local_port_t *lport = task->task_lport;
4392 3997
4393 3998 itask->itask_allocated_buf_map &= ~(1 << dbuf->db_handle);
4394 3999 task->task_cur_nbufs--;
4395 4000 lport->lport_ds->ds_free_data_buf(lport->lport_ds, dbuf);
4396 4001 }
4397 4002
4398 4003 stmf_data_buf_t *
4399 4004 stmf_handle_to_buf(scsi_task_t *task, uint8_t h)
4400 4005 {
4401 4006 stmf_i_scsi_task_t *itask;
4402 4007
4403 4008 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4404 4009 if (h > 3)
4405 4010 return (NULL);
4406 4011 return (itask->itask_dbufs[h]);
4407 4012 }
4408 4013
4409 4014 /* ARGSUSED */
4410 4015 struct scsi_task *
4411 4016 stmf_task_alloc(struct stmf_local_port *lport, stmf_scsi_session_t *ss,
4412 4017 uint8_t *lun, uint16_t cdb_length_in, uint16_t ext_id)
4413 4018 {
4414 4019 stmf_lu_t *lu;
4415 4020 stmf_i_scsi_session_t *iss;
4416 4021 stmf_i_lu_t *ilu;
4417 4022 stmf_i_scsi_task_t *itask;
4418 4023 stmf_i_scsi_task_t **ppitask;
4419 4024 scsi_task_t *task;
4420 4025 uint8_t *l;
4421 4026 stmf_lun_map_ent_t *lun_map_ent;
4422 4027 uint16_t cdb_length;
4423 4028 uint16_t luNbr;
4424 4029 uint8_t new_task = 0;
4425 4030
4426 4031 /*
4427 4032 * We allocate 7 extra bytes for CDB to provide a cdb pointer which
4428 4033 * is guaranteed to be 8 byte aligned. Some LU providers like OSD
4429 4034 * depend upon this alignment.
4430 4035 */
4431 4036 if (cdb_length_in >= 16)
4432 4037 cdb_length = cdb_length_in + 7;
4433 4038 else
4434 4039 cdb_length = 16 + 7;
4435 4040 iss = (stmf_i_scsi_session_t *)ss->ss_stmf_private;
4436 4041 luNbr = ((uint16_t)lun[1] | (((uint16_t)(lun[0] & 0x3F)) << 8));
4437 4042 rw_enter(iss->iss_lockp, RW_READER);
4438 4043 lun_map_ent =
4439 4044 (stmf_lun_map_ent_t *)stmf_get_ent_from_map(iss->iss_sm, luNbr);
|
↓ open down ↓ |
165 lines elided |
↑ open up ↑ |
4440 4045 if (!lun_map_ent) {
4441 4046 lu = dlun0;
4442 4047 } else {
4443 4048 lu = lun_map_ent->ent_lu;
4444 4049 }
4445 4050 ilu = lu->lu_stmf_private;
4446 4051 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4447 4052 rw_exit(iss->iss_lockp);
4448 4053 return (NULL);
4449 4054 }
4055 + ASSERT(lu == dlun0 || (ilu->ilu_state != STMF_STATE_OFFLINING &&
4056 + ilu->ilu_state != STMF_STATE_OFFLINE));
4450 4057 do {
4451 4058 if (ilu->ilu_free_tasks == NULL) {
4452 4059 new_task = 1;
4453 4060 break;
4454 4061 }
4455 4062 mutex_enter(&ilu->ilu_task_lock);
4456 4063 for (ppitask = &ilu->ilu_free_tasks; (*ppitask != NULL) &&
4457 4064 ((*ppitask)->itask_cdb_buf_size < cdb_length);
4458 4065 ppitask = &((*ppitask)->itask_lu_free_next))
4459 4066 ;
4460 4067 if (*ppitask) {
4461 4068 itask = *ppitask;
4462 4069 *ppitask = (*ppitask)->itask_lu_free_next;
4463 4070 ilu->ilu_ntasks_free--;
4464 4071 if (ilu->ilu_ntasks_free < ilu->ilu_ntasks_min_free)
4465 4072 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4466 4073 } else {
4467 4074 new_task = 1;
4468 4075 }
4469 4076 mutex_exit(&ilu->ilu_task_lock);
4470 4077 /* CONSTCOND */
4471 4078 } while (0);
4472 4079
4473 4080 if (!new_task) {
4474 4081 /*
4475 4082 * Save the task_cdb pointer and zero per cmd fields.
4476 4083 * We know the task_cdb_length is large enough by task
4477 4084 * selection process above.
4478 4085 */
4479 4086 uint8_t *save_cdb;
4480 4087 uintptr_t t_start, t_end;
4481 4088
4482 4089 task = itask->itask_task;
4483 4090 save_cdb = task->task_cdb; /* save */
4484 4091 t_start = (uintptr_t)&task->task_flags;
4485 4092 t_end = (uintptr_t)&task->task_extended_cmd;
4486 4093 bzero((void *)t_start, (size_t)(t_end - t_start));
4487 4094 task->task_cdb = save_cdb; /* restore */
4488 4095 itask->itask_ncmds = 0;
4489 4096 } else {
4490 4097 task = (scsi_task_t *)stmf_alloc(STMF_STRUCT_SCSI_TASK,
4491 4098 cdb_length, AF_FORCE_NOSLEEP);
4492 4099 if (task == NULL) {
4493 4100 rw_exit(iss->iss_lockp);
4494 4101 return (NULL);
4495 4102 }
4496 4103 task->task_lu = lu;
4497 4104 l = task->task_lun_no;
4498 4105 l[0] = lun[0];
4499 4106 l[1] = lun[1];
4500 4107 l[2] = lun[2];
4501 4108 l[3] = lun[3];
4502 4109 l[4] = lun[4];
4503 4110 l[5] = lun[5];
4504 4111 l[6] = lun[6];
4505 4112 l[7] = lun[7];
4506 4113 task->task_cdb = (uint8_t *)task->task_port_private;
4507 4114 if ((ulong_t)(task->task_cdb) & 7ul) {
4508 4115 task->task_cdb = (uint8_t *)(((ulong_t)
4509 4116 (task->task_cdb) + 7ul) & ~(7ul));
4510 4117 }
4511 4118 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
4512 4119 itask->itask_cdb_buf_size = cdb_length;
4513 4120 mutex_init(&itask->itask_audit_mutex, NULL, MUTEX_DRIVER, NULL);
4514 4121 }
4515 4122 task->task_session = ss;
4516 4123 task->task_lport = lport;
4517 4124 task->task_cdb_length = cdb_length_in;
4518 4125 itask->itask_flags = ITASK_IN_TRANSITION;
4519 4126 itask->itask_waitq_time = 0;
4520 4127 itask->itask_lu_read_time = itask->itask_lu_write_time = 0;
4521 4128 itask->itask_lport_read_time = itask->itask_lport_write_time = 0;
4522 4129 itask->itask_read_xfer = itask->itask_write_xfer = 0;
4523 4130 itask->itask_audit_index = 0;
4524 4131
4525 4132 if (new_task) {
4526 4133 if (lu->lu_task_alloc(task) != STMF_SUCCESS) {
4527 4134 rw_exit(iss->iss_lockp);
4528 4135 stmf_free(task);
4529 4136 return (NULL);
4530 4137 }
4531 4138 mutex_enter(&ilu->ilu_task_lock);
4532 4139 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4533 4140 mutex_exit(&ilu->ilu_task_lock);
4534 4141 rw_exit(iss->iss_lockp);
4535 4142 stmf_free(task);
4536 4143 return (NULL);
4537 4144 }
4538 4145 itask->itask_lu_next = ilu->ilu_tasks;
4539 4146 if (ilu->ilu_tasks)
4540 4147 ilu->ilu_tasks->itask_lu_prev = itask;
4541 4148 ilu->ilu_tasks = itask;
4542 4149 /* kmem_zalloc automatically makes itask->itask_lu_prev NULL */
4543 4150 ilu->ilu_ntasks++;
4544 4151 mutex_exit(&ilu->ilu_task_lock);
4545 4152 }
4546 4153
4547 4154 itask->itask_ilu_task_cntr = ilu->ilu_cur_task_cntr;
4548 4155 atomic_add_32(itask->itask_ilu_task_cntr, 1);
4549 4156 itask->itask_start_time = ddi_get_lbolt();
4550 4157
4551 4158 if ((lun_map_ent != NULL) && ((itask->itask_itl_datap =
4552 4159 lun_map_ent->ent_itl_datap) != NULL)) {
4553 4160 atomic_add_32(&itask->itask_itl_datap->itl_counter, 1);
4554 4161 task->task_lu_itl_handle = itask->itask_itl_datap->itl_handle;
4555 4162 } else {
4556 4163 itask->itask_itl_datap = NULL;
4557 4164 task->task_lu_itl_handle = NULL;
4558 4165 }
4559 4166
4560 4167 rw_exit(iss->iss_lockp);
4561 4168 return (task);
4562 4169 }
4563 4170
4564 4171 static void
4565 4172 stmf_task_lu_free(scsi_task_t *task, stmf_i_scsi_session_t *iss)
4566 4173 {
4567 4174 stmf_i_scsi_task_t *itask =
|
↓ open down ↓ |
108 lines elided |
↑ open up ↑ |
4568 4175 (stmf_i_scsi_task_t *)task->task_stmf_private;
4569 4176 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4570 4177
4571 4178 ASSERT(rw_lock_held(iss->iss_lockp));
4572 4179 itask->itask_flags = ITASK_IN_FREE_LIST;
4573 4180 itask->itask_proxy_msg_id = 0;
4574 4181 mutex_enter(&ilu->ilu_task_lock);
4575 4182 itask->itask_lu_free_next = ilu->ilu_free_tasks;
4576 4183 ilu->ilu_free_tasks = itask;
4577 4184 ilu->ilu_ntasks_free++;
4185 + if (ilu->ilu_ntasks == ilu->ilu_ntasks_free)
4186 + cv_signal(&ilu->ilu_offline_pending_cv);
4578 4187 mutex_exit(&ilu->ilu_task_lock);
4579 4188 atomic_add_32(itask->itask_ilu_task_cntr, -1);
4580 4189 }
4581 4190
4582 4191 void
4583 4192 stmf_task_lu_check_freelist(stmf_i_lu_t *ilu)
4584 4193 {
4585 4194 uint32_t num_to_release, ndx;
4586 4195 stmf_i_scsi_task_t *itask;
4587 4196 stmf_lu_t *lu = ilu->ilu_lu;
4588 4197
4589 4198 ASSERT(ilu->ilu_ntasks_min_free <= ilu->ilu_ntasks_free);
4590 4199
4591 4200 /* free half of the minimal free of the free tasks */
4592 4201 num_to_release = (ilu->ilu_ntasks_min_free + 1) / 2;
4593 4202 if (!num_to_release) {
4594 4203 return;
4595 4204 }
4596 4205 for (ndx = 0; ndx < num_to_release; ndx++) {
4597 4206 mutex_enter(&ilu->ilu_task_lock);
4598 4207 itask = ilu->ilu_free_tasks;
4599 4208 if (itask == NULL) {
4600 4209 mutex_exit(&ilu->ilu_task_lock);
4601 4210 break;
4602 4211 }
4603 4212 ilu->ilu_free_tasks = itask->itask_lu_free_next;
4604 4213 ilu->ilu_ntasks_free--;
4605 4214 mutex_exit(&ilu->ilu_task_lock);
4606 4215
4607 4216 lu->lu_task_free(itask->itask_task);
4608 4217 mutex_enter(&ilu->ilu_task_lock);
4609 4218 if (itask->itask_lu_next)
4610 4219 itask->itask_lu_next->itask_lu_prev =
4611 4220 itask->itask_lu_prev;
4612 4221 if (itask->itask_lu_prev)
4613 4222 itask->itask_lu_prev->itask_lu_next =
4614 4223 itask->itask_lu_next;
4615 4224 else
4616 4225 ilu->ilu_tasks = itask->itask_lu_next;
4617 4226
4618 4227 ilu->ilu_ntasks--;
4619 4228 mutex_exit(&ilu->ilu_task_lock);
4620 4229 stmf_free(itask->itask_task);
4621 4230 }
4622 4231 }
4623 4232
4624 4233 /*
4625 4234 * Called with stmf_lock held
4626 4235 */
4627 4236 void
4628 4237 stmf_check_freetask()
4629 4238 {
4630 4239 stmf_i_lu_t *ilu;
4631 4240 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000);
4632 4241
4633 4242 /* stmf_svc_ilu_draining may get changed after stmf_lock is released */
4634 4243 while ((ilu = stmf_state.stmf_svc_ilu_draining) != NULL) {
4635 4244 stmf_state.stmf_svc_ilu_draining = ilu->ilu_next;
4636 4245 if (!ilu->ilu_ntasks_min_free) {
4637 4246 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4638 4247 continue;
4639 4248 }
4640 4249 ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4641 4250 mutex_exit(&stmf_state.stmf_lock);
4642 4251 stmf_task_lu_check_freelist(ilu);
4643 4252 /*
4644 4253 * we do not care about the accuracy of
4645 4254 * ilu_ntasks_min_free, so we don't lock here
4646 4255 */
4647 4256 ilu->ilu_ntasks_min_free = ilu->ilu_ntasks_free;
4648 4257 mutex_enter(&stmf_state.stmf_lock);
4649 4258 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4650 4259 cv_broadcast(&stmf_state.stmf_cv);
4651 4260 if (ddi_get_lbolt() >= endtime)
4652 4261 break;
4653 4262 }
4654 4263 }
4655 4264
4656 4265 void
4657 4266 stmf_do_ilu_timeouts(stmf_i_lu_t *ilu)
4658 4267 {
4659 4268 clock_t l = ddi_get_lbolt();
4660 4269 clock_t ps = drv_usectohz(1000000);
4661 4270 stmf_i_scsi_task_t *itask;
4662 4271 scsi_task_t *task;
4663 4272 uint32_t to;
4664 4273
4665 4274 mutex_enter(&ilu->ilu_task_lock);
4666 4275 for (itask = ilu->ilu_tasks; itask != NULL;
4667 4276 itask = itask->itask_lu_next) {
4668 4277 if (itask->itask_flags & (ITASK_IN_FREE_LIST |
4669 4278 ITASK_BEING_ABORTED)) {
4670 4279 continue;
4671 4280 }
4672 4281 task = itask->itask_task;
4673 4282 if (task->task_timeout == 0)
4674 4283 to = stmf_default_task_timeout;
4675 4284 else
4676 4285 to = task->task_timeout;
4677 4286 if ((itask->itask_start_time + (to * ps)) > l)
4678 4287 continue;
4679 4288 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
4680 4289 STMF_TIMEOUT, NULL);
4681 4290 }
4682 4291 mutex_exit(&ilu->ilu_task_lock);
4683 4292 }
4684 4293
4685 4294 /*
4686 4295 * Called with stmf_lock held
4687 4296 */
4688 4297 void
4689 4298 stmf_check_ilu_timing()
4690 4299 {
4691 4300 stmf_i_lu_t *ilu;
4692 4301 clock_t endtime = ddi_get_lbolt() + drv_usectohz(10000);
4693 4302
4694 4303 /* stmf_svc_ilu_timing may get changed after stmf_lock is released */
4695 4304 while ((ilu = stmf_state.stmf_svc_ilu_timing) != NULL) {
4696 4305 stmf_state.stmf_svc_ilu_timing = ilu->ilu_next;
4697 4306 if (ilu->ilu_cur_task_cntr == (&ilu->ilu_task_cntr1)) {
4698 4307 if (ilu->ilu_task_cntr2 == 0) {
4699 4308 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr2;
4700 4309 continue;
4701 4310 }
4702 4311 } else {
4703 4312 if (ilu->ilu_task_cntr1 == 0) {
4704 4313 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
4705 4314 continue;
4706 4315 }
4707 4316 }
4708 4317 /*
4709 4318 * If we are here then it means that there is some slowdown
4710 4319 * in tasks on this lu. We need to check.
4711 4320 */
4712 4321 ilu->ilu_flags |= ILU_STALL_DEREGISTER;
4713 4322 mutex_exit(&stmf_state.stmf_lock);
4714 4323 stmf_do_ilu_timeouts(ilu);
4715 4324 mutex_enter(&stmf_state.stmf_lock);
4716 4325 ilu->ilu_flags &= ~ILU_STALL_DEREGISTER;
4717 4326 cv_broadcast(&stmf_state.stmf_cv);
4718 4327 if (ddi_get_lbolt() >= endtime)
4719 4328 break;
4720 4329 }
4721 4330 }
4722 4331
4723 4332 /*
4724 4333 * Kills all tasks on a lu except tm_task
4725 4334 */
4726 4335 void
4727 4336 stmf_task_lu_killall(stmf_lu_t *lu, scsi_task_t *tm_task, stmf_status_t s)
4728 4337 {
4729 4338 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
4730 4339 stmf_i_scsi_task_t *itask;
4731 4340
4732 4341 mutex_enter(&ilu->ilu_task_lock);
4733 4342
4734 4343 for (itask = ilu->ilu_tasks; itask != NULL;
4735 4344 itask = itask->itask_lu_next) {
4736 4345 if (itask->itask_flags & ITASK_IN_FREE_LIST)
4737 4346 continue;
4738 4347 if (itask->itask_task == tm_task)
4739 4348 continue;
4740 4349 stmf_abort(STMF_QUEUE_TASK_ABORT, itask->itask_task, s, NULL);
4741 4350 }
4742 4351 mutex_exit(&ilu->ilu_task_lock);
4743 4352 }
4744 4353
4745 4354 void
4746 4355 stmf_free_task_bufs(stmf_i_scsi_task_t *itask, stmf_local_port_t *lport)
4747 4356 {
4748 4357 int i;
4749 4358 uint8_t map;
4750 4359
4751 4360 if ((map = itask->itask_allocated_buf_map) == 0)
4752 4361 return;
4753 4362 for (i = 0; i < 4; i++) {
4754 4363 if (map & 1) {
4755 4364 stmf_data_buf_t *dbuf;
4756 4365
4757 4366 dbuf = itask->itask_dbufs[i];
4758 4367 if (dbuf->db_xfer_start_timestamp) {
4759 4368 stmf_lport_xfer_done(itask, dbuf);
4760 4369 }
4761 4370 if (dbuf->db_flags & DB_LU_DATA_BUF) {
4762 4371 /*
4763 4372 * LU needs to clean up buffer.
4764 4373 * LU is required to free the buffer
4765 4374 * in the xfer_done handler.
4766 4375 */
4767 4376 scsi_task_t *task = itask->itask_task;
4768 4377 stmf_lu_t *lu = task->task_lu;
4769 4378
4770 4379 lu->lu_dbuf_free(task, dbuf);
4771 4380 ASSERT(((itask->itask_allocated_buf_map>>i)
4772 4381 & 1) == 0); /* must be gone */
4773 4382 } else {
4774 4383 ASSERT(dbuf->db_lu_private == NULL);
4775 4384 dbuf->db_lu_private = NULL;
4776 4385 lport->lport_ds->ds_free_data_buf(
4777 4386 lport->lport_ds, dbuf);
4778 4387 }
4779 4388 }
4780 4389 map >>= 1;
4781 4390 }
4782 4391 itask->itask_allocated_buf_map = 0;
4783 4392 }
4784 4393
4785 4394 void
4786 4395 stmf_task_free(scsi_task_t *task)
4787 4396 {
4788 4397 stmf_local_port_t *lport = task->task_lport;
4789 4398 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4790 4399 task->task_stmf_private;
4791 4400 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
4792 4401 task->task_session->ss_stmf_private;
4793 4402
4794 4403 stmf_task_audit(itask, TE_TASK_FREE, CMD_OR_IOF_NA, NULL);
4795 4404
4796 4405 stmf_free_task_bufs(itask, lport);
4797 4406 stmf_itl_task_done(itask);
4798 4407 DTRACE_PROBE2(stmf__task__end, scsi_task_t *, task,
4799 4408 hrtime_t,
4800 4409 itask->itask_done_timestamp - itask->itask_start_timestamp);
4801 4410 if (itask->itask_itl_datap) {
4802 4411 if (atomic_add_32_nv(&itask->itask_itl_datap->itl_counter,
4803 4412 -1) == 0) {
4804 4413 stmf_release_itl_handle(task->task_lu,
4805 4414 itask->itask_itl_datap);
4806 4415 }
4807 4416 }
4808 4417
4809 4418 rw_enter(iss->iss_lockp, RW_READER);
4810 4419 lport->lport_task_free(task);
4811 4420 if (itask->itask_worker) {
4812 4421 atomic_add_32(&stmf_cur_ntasks, -1);
4813 4422 atomic_add_32(&itask->itask_worker->worker_ref_count, -1);
4814 4423 }
4815 4424 /*
4816 4425 * After calling stmf_task_lu_free, the task pointer can no longer
4817 4426 * be trusted.
4818 4427 */
4819 4428 stmf_task_lu_free(task, iss);
4820 4429 rw_exit(iss->iss_lockp);
4821 4430 }
4822 4431
4823 4432 void
4824 4433 stmf_post_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
4825 4434 {
4826 4435 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
4827 4436 task->task_stmf_private;
4828 4437 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
4829 4438 int nv;
4830 4439 uint32_t old, new;
4831 4440 uint32_t ct;
4832 4441 stmf_worker_t *w, *w1;
4833 4442 uint8_t tm;
4834 4443
4835 4444 if (task->task_max_nbufs > 4)
4836 4445 task->task_max_nbufs = 4;
4837 4446 task->task_cur_nbufs = 0;
4838 4447 /* Latest value of currently running tasks */
4839 4448 ct = atomic_add_32_nv(&stmf_cur_ntasks, 1);
4840 4449
4841 4450 /* Select the next worker using round robin */
4842 4451 nv = (int)atomic_add_32_nv((uint32_t *)&stmf_worker_sel_counter, 1);
4843 4452 if (nv >= stmf_nworkers_accepting_cmds) {
4844 4453 int s = nv;
4845 4454 do {
4846 4455 nv -= stmf_nworkers_accepting_cmds;
4847 4456 } while (nv >= stmf_nworkers_accepting_cmds);
4848 4457 if (nv < 0)
4849 4458 nv = 0;
4850 4459 /* Its ok if this cas fails */
4851 4460 (void) atomic_cas_32((uint32_t *)&stmf_worker_sel_counter,
4852 4461 s, nv);
4853 4462 }
4854 4463 w = &stmf_workers[nv];
4855 4464
4856 4465 /*
4857 4466 * A worker can be pinned by interrupt. So select the next one
4858 4467 * if it has lower load.
4859 4468 */
4860 4469 if ((nv + 1) >= stmf_nworkers_accepting_cmds) {
4861 4470 w1 = stmf_workers;
4862 4471 } else {
4863 4472 w1 = &stmf_workers[nv + 1];
4864 4473 }
4865 4474 if (w1->worker_queue_depth < w->worker_queue_depth)
4866 4475 w = w1;
4867 4476
4868 4477 mutex_enter(&w->worker_lock);
4869 4478 if (((w->worker_flags & STMF_WORKER_STARTED) == 0) ||
4870 4479 (w->worker_flags & STMF_WORKER_TERMINATE)) {
4871 4480 /*
4872 4481 * Maybe we are in the middle of a change. Just go to
4873 4482 * the 1st worker.
4874 4483 */
4875 4484 mutex_exit(&w->worker_lock);
4876 4485 w = stmf_workers;
4877 4486 mutex_enter(&w->worker_lock);
4878 4487 }
4879 4488 itask->itask_worker = w;
4880 4489 /*
4881 4490 * Track max system load inside the worker as we already have the
4882 4491 * worker lock (no point implementing another lock). The service
4883 4492 * thread will do the comparisons and figure out the max overall
4884 4493 * system load.
4885 4494 */
4886 4495 if (w->worker_max_sys_qdepth_pu < ct)
4887 4496 w->worker_max_sys_qdepth_pu = ct;
4888 4497
4889 4498 do {
4890 4499 old = new = itask->itask_flags;
4891 4500 new |= ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE;
4892 4501 if (task->task_mgmt_function) {
4893 4502 tm = task->task_mgmt_function;
4894 4503 if ((tm == TM_TARGET_RESET) ||
4895 4504 (tm == TM_TARGET_COLD_RESET) ||
4896 4505 (tm == TM_TARGET_WARM_RESET)) {
4897 4506 new |= ITASK_DEFAULT_HANDLING;
4898 4507 }
4899 4508 } else if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
4900 4509 new |= ITASK_DEFAULT_HANDLING;
4901 4510 }
4902 4511 new &= ~ITASK_IN_TRANSITION;
4903 4512 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
4904 4513
4905 4514 stmf_itl_task_start(itask);
4906 4515
4907 4516 itask->itask_worker_next = NULL;
4908 4517 if (w->worker_task_tail) {
4909 4518 w->worker_task_tail->itask_worker_next = itask;
4910 4519 } else {
4911 4520 w->worker_task_head = itask;
4912 4521 }
4913 4522 w->worker_task_tail = itask;
4914 4523 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
4915 4524 w->worker_max_qdepth_pu = w->worker_queue_depth;
4916 4525 }
4917 4526 /* Measure task waitq time */
4918 4527 itask->itask_waitq_enter_timestamp = gethrtime();
4919 4528 atomic_add_32(&w->worker_ref_count, 1);
4920 4529 itask->itask_cmd_stack[0] = ITASK_CMD_NEW_TASK;
4921 4530 itask->itask_ncmds = 1;
4922 4531 stmf_task_audit(itask, TE_TASK_START, CMD_OR_IOF_NA, dbuf);
4923 4532 if (dbuf) {
4924 4533 itask->itask_allocated_buf_map = 1;
4925 4534 itask->itask_dbufs[0] = dbuf;
4926 4535 dbuf->db_handle = 0;
4927 4536 } else {
4928 4537 itask->itask_allocated_buf_map = 0;
4929 4538 itask->itask_dbufs[0] = NULL;
4930 4539 }
4931 4540
4932 4541 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) {
4933 4542 w->worker_signal_timestamp = gethrtime();
4934 4543 DTRACE_PROBE2(worker__signal, stmf_worker_t *, w,
4935 4544 scsi_task_t *, task);
4936 4545 cv_signal(&w->worker_cv);
4937 4546 }
4938 4547 mutex_exit(&w->worker_lock);
4939 4548
4940 4549 /*
4941 4550 * This can only happen if during stmf_task_alloc(), ILU_RESET_ACTIVE
4942 4551 * was set between checking of ILU_RESET_ACTIVE and clearing of the
4943 4552 * ITASK_IN_FREE_LIST flag. Take care of these "sneaked-in" tasks here.
4944 4553 */
4945 4554 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
4946 4555 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ABORTED, NULL);
4947 4556 }
4948 4557 }
4949 4558
4950 4559 static void
4951 4560 stmf_task_audit(stmf_i_scsi_task_t *itask,
4952 4561 task_audit_event_t te, uint32_t cmd_or_iof, stmf_data_buf_t *dbuf)
4953 4562 {
4954 4563 stmf_task_audit_rec_t *ar;
4955 4564
4956 4565 mutex_enter(&itask->itask_audit_mutex);
4957 4566 ar = &itask->itask_audit_records[itask->itask_audit_index++];
4958 4567 itask->itask_audit_index &= (ITASK_TASK_AUDIT_DEPTH - 1);
4959 4568 ar->ta_event = te;
4960 4569 ar->ta_cmd_or_iof = cmd_or_iof;
4961 4570 ar->ta_itask_flags = itask->itask_flags;
4962 4571 ar->ta_dbuf = dbuf;
4963 4572 gethrestime(&ar->ta_timestamp);
4964 4573 mutex_exit(&itask->itask_audit_mutex);
4965 4574 }
4966 4575
4967 4576
4968 4577 /*
4969 4578 * ++++++++++++++ ABORT LOGIC ++++++++++++++++++++
4970 4579 * Once ITASK_BEING_ABORTED is set, ITASK_KNOWN_TO_LU can be reset already
4971 4580 * i.e. before ITASK_BEING_ABORTED being set. But if it was not, it cannot
4972 4581 * be reset until the LU explicitly calls stmf_task_lu_aborted(). Of course
4973 4582 * the LU will make this call only if we call the LU's abort entry point.
4974 4583 * we will only call that entry point if ITASK_KNOWN_TO_LU was set.
4975 4584 *
4976 4585 * Same logic applies for the port.
4977 4586 *
4978 4587 * Also ITASK_BEING_ABORTED will not be allowed to set if both KNOWN_TO_LU
4979 4588 * and KNOWN_TO_TGT_PORT are reset.
4980 4589 *
4981 4590 * +++++++++++++++++++++++++++++++++++++++++++++++
4982 4591 */
4983 4592
4984 4593 stmf_status_t
4985 4594 stmf_xfer_data(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t ioflags)
4986 4595 {
4987 4596 stmf_status_t ret = STMF_SUCCESS;
4988 4597
4989 4598 stmf_i_scsi_task_t *itask =
4990 4599 (stmf_i_scsi_task_t *)task->task_stmf_private;
4991 4600
4992 4601 stmf_task_audit(itask, TE_XFER_START, ioflags, dbuf);
4993 4602
4994 4603 if (ioflags & STMF_IOF_LU_DONE) {
4995 4604 uint32_t new, old;
4996 4605 do {
4997 4606 new = old = itask->itask_flags;
4998 4607 if (new & ITASK_BEING_ABORTED)
4999 4608 return (STMF_ABORTED);
5000 4609 new &= ~ITASK_KNOWN_TO_LU;
5001 4610 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5002 4611 }
5003 4612 if (itask->itask_flags & ITASK_BEING_ABORTED)
5004 4613 return (STMF_ABORTED);
5005 4614 #ifdef DEBUG
5006 4615 if (!(ioflags & STMF_IOF_STATS_ONLY) && stmf_drop_buf_counter > 0) {
5007 4616 if (atomic_add_32_nv((uint32_t *)&stmf_drop_buf_counter, -1) ==
5008 4617 1)
5009 4618 return (STMF_SUCCESS);
5010 4619 }
5011 4620 #endif
5012 4621
5013 4622 stmf_update_kstat_lu_io(task, dbuf);
5014 4623 stmf_update_kstat_lport_io(task, dbuf);
5015 4624 stmf_lport_xfer_start(itask, dbuf);
5016 4625 if (ioflags & STMF_IOF_STATS_ONLY) {
5017 4626 stmf_lport_xfer_done(itask, dbuf);
5018 4627 return (STMF_SUCCESS);
5019 4628 }
5020 4629
5021 4630 dbuf->db_flags |= DB_LPORT_XFER_ACTIVE;
5022 4631 ret = task->task_lport->lport_xfer_data(task, dbuf, ioflags);
5023 4632
5024 4633 /*
5025 4634 * Port provider may have already called the buffer callback in
5026 4635 * which case dbuf->db_xfer_start_timestamp will be 0.
5027 4636 */
5028 4637 if (ret != STMF_SUCCESS) {
5029 4638 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
5030 4639 if (dbuf->db_xfer_start_timestamp != 0)
5031 4640 stmf_lport_xfer_done(itask, dbuf);
5032 4641 }
5033 4642
5034 4643 return (ret);
5035 4644 }
5036 4645
5037 4646 void
5038 4647 stmf_data_xfer_done(scsi_task_t *task, stmf_data_buf_t *dbuf, uint32_t iof)
5039 4648 {
5040 4649 stmf_i_scsi_task_t *itask =
5041 4650 (stmf_i_scsi_task_t *)task->task_stmf_private;
5042 4651 stmf_i_local_port_t *ilport;
5043 4652 stmf_worker_t *w = itask->itask_worker;
5044 4653 uint32_t new, old;
5045 4654 uint8_t update_queue_flags, free_it, queue_it;
5046 4655
5047 4656 stmf_lport_xfer_done(itask, dbuf);
5048 4657
5049 4658 stmf_task_audit(itask, TE_XFER_DONE, iof, dbuf);
5050 4659
5051 4660 /* Guard against unexpected completions from the lport */
5052 4661 if (dbuf->db_flags & DB_LPORT_XFER_ACTIVE) {
5053 4662 dbuf->db_flags &= ~DB_LPORT_XFER_ACTIVE;
5054 4663 } else {
5055 4664 /*
5056 4665 * This should never happen.
5057 4666 */
5058 4667 ilport = task->task_lport->lport_stmf_private;
5059 4668 ilport->ilport_unexpected_comp++;
5060 4669 cmn_err(CE_PANIC, "Unexpected xfer completion task %p dbuf %p",
5061 4670 (void *)task, (void *)dbuf);
5062 4671 return;
5063 4672 }
5064 4673
5065 4674 mutex_enter(&w->worker_lock);
5066 4675 do {
5067 4676 new = old = itask->itask_flags;
5068 4677 if (old & ITASK_BEING_ABORTED) {
5069 4678 mutex_exit(&w->worker_lock);
5070 4679 return;
5071 4680 }
5072 4681 free_it = 0;
5073 4682 if (iof & STMF_IOF_LPORT_DONE) {
5074 4683 new &= ~ITASK_KNOWN_TO_TGT_PORT;
5075 4684 task->task_completion_status = dbuf->db_xfer_status;
5076 4685 free_it = 1;
5077 4686 }
5078 4687 /*
5079 4688 * If the task is known to LU then queue it. But if
5080 4689 * it is already queued (multiple completions) then
5081 4690 * just update the buffer information by grabbing the
5082 4691 * worker lock. If the task is not known to LU,
5083 4692 * completed/aborted, then see if we need to
5084 4693 * free this task.
5085 4694 */
5086 4695 if (old & ITASK_KNOWN_TO_LU) {
5087 4696 free_it = 0;
5088 4697 update_queue_flags = 1;
5089 4698 if (old & ITASK_IN_WORKER_QUEUE) {
5090 4699 queue_it = 0;
5091 4700 } else {
5092 4701 queue_it = 1;
5093 4702 new |= ITASK_IN_WORKER_QUEUE;
5094 4703 }
5095 4704 } else {
5096 4705 update_queue_flags = 0;
5097 4706 queue_it = 0;
5098 4707 }
5099 4708 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5100 4709
5101 4710 if (update_queue_flags) {
5102 4711 uint8_t cmd = (dbuf->db_handle << 5) | ITASK_CMD_DATA_XFER_DONE;
5103 4712
5104 4713 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
5105 4714 itask->itask_cmd_stack[itask->itask_ncmds++] = cmd;
5106 4715 if (queue_it) {
5107 4716 itask->itask_worker_next = NULL;
5108 4717 if (w->worker_task_tail) {
5109 4718 w->worker_task_tail->itask_worker_next = itask;
5110 4719 } else {
5111 4720 w->worker_task_head = itask;
5112 4721 }
5113 4722 w->worker_task_tail = itask;
5114 4723 /* Measure task waitq time */
5115 4724 itask->itask_waitq_enter_timestamp = gethrtime();
5116 4725 if (++(w->worker_queue_depth) >
5117 4726 w->worker_max_qdepth_pu) {
5118 4727 w->worker_max_qdepth_pu = w->worker_queue_depth;
5119 4728 }
5120 4729 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5121 4730 cv_signal(&w->worker_cv);
5122 4731 }
5123 4732 }
5124 4733 mutex_exit(&w->worker_lock);
5125 4734
5126 4735 if (free_it) {
5127 4736 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5128 4737 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5129 4738 ITASK_BEING_ABORTED)) == 0) {
5130 4739 stmf_task_free(task);
5131 4740 }
5132 4741 }
5133 4742 }
5134 4743
5135 4744 stmf_status_t
5136 4745 stmf_send_scsi_status(scsi_task_t *task, uint32_t ioflags)
5137 4746 {
5138 4747 DTRACE_PROBE1(scsi__send__status, scsi_task_t *, task);
5139 4748
5140 4749 stmf_i_scsi_task_t *itask =
5141 4750 (stmf_i_scsi_task_t *)task->task_stmf_private;
5142 4751
5143 4752 stmf_task_audit(itask, TE_SEND_STATUS, ioflags, NULL);
5144 4753
5145 4754 if (ioflags & STMF_IOF_LU_DONE) {
5146 4755 uint32_t new, old;
5147 4756 do {
5148 4757 new = old = itask->itask_flags;
5149 4758 if (new & ITASK_BEING_ABORTED)
5150 4759 return (STMF_ABORTED);
5151 4760 new &= ~ITASK_KNOWN_TO_LU;
5152 4761 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5153 4762 }
5154 4763
5155 4764 if (!(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT)) {
5156 4765 return (STMF_SUCCESS);
5157 4766 }
5158 4767
5159 4768 if (itask->itask_flags & ITASK_BEING_ABORTED)
5160 4769 return (STMF_ABORTED);
5161 4770
5162 4771 if (task->task_additional_flags & TASK_AF_NO_EXPECTED_XFER_LENGTH) {
5163 4772 task->task_status_ctrl = 0;
5164 4773 task->task_resid = 0;
5165 4774 } else if (task->task_cmd_xfer_length >
5166 4775 task->task_expected_xfer_length) {
5167 4776 task->task_status_ctrl = TASK_SCTRL_OVER;
5168 4777 task->task_resid = task->task_cmd_xfer_length -
5169 4778 task->task_expected_xfer_length;
5170 4779 } else if (task->task_nbytes_transferred <
5171 4780 task->task_expected_xfer_length) {
5172 4781 task->task_status_ctrl = TASK_SCTRL_UNDER;
5173 4782 task->task_resid = task->task_expected_xfer_length -
5174 4783 task->task_nbytes_transferred;
5175 4784 } else {
5176 4785 task->task_status_ctrl = 0;
5177 4786 task->task_resid = 0;
5178 4787 }
5179 4788 return (task->task_lport->lport_send_status(task, ioflags));
5180 4789 }
5181 4790
5182 4791 void
5183 4792 stmf_send_status_done(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5184 4793 {
5185 4794 stmf_i_scsi_task_t *itask =
5186 4795 (stmf_i_scsi_task_t *)task->task_stmf_private;
5187 4796 stmf_worker_t *w = itask->itask_worker;
5188 4797 uint32_t new, old;
5189 4798 uint8_t free_it, queue_it;
5190 4799
5191 4800 stmf_task_audit(itask, TE_SEND_STATUS_DONE, iof, NULL);
5192 4801
5193 4802 mutex_enter(&w->worker_lock);
5194 4803 do {
5195 4804 new = old = itask->itask_flags;
5196 4805 if (old & ITASK_BEING_ABORTED) {
5197 4806 mutex_exit(&w->worker_lock);
5198 4807 return;
5199 4808 }
5200 4809 free_it = 0;
5201 4810 if (iof & STMF_IOF_LPORT_DONE) {
5202 4811 new &= ~ITASK_KNOWN_TO_TGT_PORT;
5203 4812 free_it = 1;
5204 4813 }
5205 4814 /*
5206 4815 * If the task is known to LU then queue it. But if
5207 4816 * it is already queued (multiple completions) then
5208 4817 * just update the buffer information by grabbing the
5209 4818 * worker lock. If the task is not known to LU,
5210 4819 * completed/aborted, then see if we need to
5211 4820 * free this task.
5212 4821 */
5213 4822 if (old & ITASK_KNOWN_TO_LU) {
5214 4823 free_it = 0;
5215 4824 queue_it = 1;
5216 4825 if (old & ITASK_IN_WORKER_QUEUE) {
5217 4826 cmn_err(CE_PANIC, "status completion received"
5218 4827 " when task is already in worker queue "
5219 4828 " task = %p", (void *)task);
5220 4829 }
5221 4830 new |= ITASK_IN_WORKER_QUEUE;
5222 4831 } else {
5223 4832 queue_it = 0;
5224 4833 }
5225 4834 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5226 4835 task->task_completion_status = s;
5227 4836
5228 4837
5229 4838 if (queue_it) {
5230 4839 ASSERT(itask->itask_ncmds < ITASK_MAX_NCMDS);
5231 4840 itask->itask_cmd_stack[itask->itask_ncmds++] =
5232 4841 ITASK_CMD_STATUS_DONE;
5233 4842 itask->itask_worker_next = NULL;
5234 4843 if (w->worker_task_tail) {
5235 4844 w->worker_task_tail->itask_worker_next = itask;
5236 4845 } else {
5237 4846 w->worker_task_head = itask;
5238 4847 }
5239 4848 w->worker_task_tail = itask;
5240 4849 /* Measure task waitq time */
5241 4850 itask->itask_waitq_enter_timestamp = gethrtime();
5242 4851 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5243 4852 w->worker_max_qdepth_pu = w->worker_queue_depth;
5244 4853 }
5245 4854 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5246 4855 cv_signal(&w->worker_cv);
5247 4856 }
5248 4857 mutex_exit(&w->worker_lock);
5249 4858
5250 4859 if (free_it) {
5251 4860 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5252 4861 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5253 4862 ITASK_BEING_ABORTED)) == 0) {
5254 4863 stmf_task_free(task);
5255 4864 } else {
5256 4865 cmn_err(CE_PANIC, "LU is done with the task but LPORT "
5257 4866 " is not done, itask %p itask_flags %x",
5258 4867 (void *)itask, itask->itask_flags);
5259 4868 }
5260 4869 }
5261 4870 }
5262 4871
5263 4872 void
5264 4873 stmf_task_lu_done(scsi_task_t *task)
5265 4874 {
5266 4875 stmf_i_scsi_task_t *itask =
5267 4876 (stmf_i_scsi_task_t *)task->task_stmf_private;
5268 4877 stmf_worker_t *w = itask->itask_worker;
5269 4878 uint32_t new, old;
5270 4879
5271 4880 mutex_enter(&w->worker_lock);
5272 4881 do {
5273 4882 new = old = itask->itask_flags;
5274 4883 if (old & ITASK_BEING_ABORTED) {
5275 4884 mutex_exit(&w->worker_lock);
5276 4885 return;
5277 4886 }
5278 4887 if (old & ITASK_IN_WORKER_QUEUE) {
5279 4888 cmn_err(CE_PANIC, "task_lu_done received"
5280 4889 " when task is in worker queue "
5281 4890 " task = %p", (void *)task);
5282 4891 }
5283 4892 new &= ~ITASK_KNOWN_TO_LU;
5284 4893 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5285 4894
5286 4895 mutex_exit(&w->worker_lock);
5287 4896
5288 4897 if ((itask->itask_flags & (ITASK_KNOWN_TO_LU |
5289 4898 ITASK_KNOWN_TO_TGT_PORT | ITASK_IN_WORKER_QUEUE |
5290 4899 ITASK_BEING_ABORTED)) == 0) {
5291 4900 stmf_task_free(task);
5292 4901 } else {
5293 4902 cmn_err(CE_PANIC, "stmf_lu_done should be the last stage but "
5294 4903 " the task is still not done, task = %p", (void *)task);
5295 4904 }
5296 4905 }
5297 4906
5298 4907 void
5299 4908 stmf_queue_task_for_abort(scsi_task_t *task, stmf_status_t s)
5300 4909 {
5301 4910 stmf_i_scsi_task_t *itask =
5302 4911 (stmf_i_scsi_task_t *)task->task_stmf_private;
5303 4912 stmf_worker_t *w;
5304 4913 uint32_t old, new;
5305 4914
5306 4915 stmf_task_audit(itask, TE_TASK_ABORT, CMD_OR_IOF_NA, NULL);
5307 4916
5308 4917 do {
5309 4918 old = new = itask->itask_flags;
5310 4919 if ((old & ITASK_BEING_ABORTED) ||
5311 4920 ((old & (ITASK_KNOWN_TO_TGT_PORT |
5312 4921 ITASK_KNOWN_TO_LU)) == 0)) {
5313 4922 return;
5314 4923 }
5315 4924 new |= ITASK_BEING_ABORTED;
5316 4925 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5317 4926 task->task_completion_status = s;
5318 4927 itask->itask_start_time = ddi_get_lbolt();
5319 4928
5320 4929 if (((w = itask->itask_worker) == NULL) ||
5321 4930 (itask->itask_flags & ITASK_IN_TRANSITION)) {
5322 4931 return;
5323 4932 }
5324 4933
5325 4934 /* Queue it and get out */
5326 4935 mutex_enter(&w->worker_lock);
5327 4936 if (itask->itask_flags & ITASK_IN_WORKER_QUEUE) {
5328 4937 mutex_exit(&w->worker_lock);
5329 4938 return;
5330 4939 }
5331 4940 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE);
5332 4941 itask->itask_worker_next = NULL;
5333 4942 if (w->worker_task_tail) {
5334 4943 w->worker_task_tail->itask_worker_next = itask;
5335 4944 } else {
5336 4945 w->worker_task_head = itask;
5337 4946 }
5338 4947 w->worker_task_tail = itask;
5339 4948 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5340 4949 w->worker_max_qdepth_pu = w->worker_queue_depth;
5341 4950 }
5342 4951 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5343 4952 cv_signal(&w->worker_cv);
5344 4953 mutex_exit(&w->worker_lock);
5345 4954 }
5346 4955
5347 4956 void
5348 4957 stmf_abort(int abort_cmd, scsi_task_t *task, stmf_status_t s, void *arg)
5349 4958 {
5350 4959 stmf_i_scsi_task_t *itask = NULL;
5351 4960 uint32_t old, new, f, rf;
5352 4961
5353 4962 DTRACE_PROBE2(scsi__task__abort, scsi_task_t *, task,
5354 4963 stmf_status_t, s);
5355 4964
5356 4965 switch (abort_cmd) {
5357 4966 case STMF_QUEUE_ABORT_LU:
5358 4967 stmf_task_lu_killall((stmf_lu_t *)arg, task, s);
5359 4968 return;
5360 4969 case STMF_QUEUE_TASK_ABORT:
5361 4970 stmf_queue_task_for_abort(task, s);
5362 4971 return;
5363 4972 case STMF_REQUEUE_TASK_ABORT_LPORT:
5364 4973 rf = ITASK_TGT_PORT_ABORT_CALLED;
5365 4974 f = ITASK_KNOWN_TO_TGT_PORT;
5366 4975 break;
5367 4976 case STMF_REQUEUE_TASK_ABORT_LU:
5368 4977 rf = ITASK_LU_ABORT_CALLED;
5369 4978 f = ITASK_KNOWN_TO_LU;
5370 4979 break;
5371 4980 default:
5372 4981 return;
5373 4982 }
5374 4983 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
5375 4984 f |= ITASK_BEING_ABORTED | rf;
5376 4985 do {
5377 4986 old = new = itask->itask_flags;
5378 4987 if ((old & f) != f) {
5379 4988 return;
5380 4989 }
5381 4990 new &= ~rf;
5382 4991 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5383 4992 }
5384 4993
5385 4994 void
5386 4995 stmf_task_lu_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5387 4996 {
5388 4997 char info[STMF_CHANGE_INFO_LEN];
5389 4998 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task);
5390 4999 unsigned long long st;
5391 5000
5392 5001 stmf_task_audit(itask, TE_TASK_LU_ABORTED, iof, NULL);
5393 5002
5394 5003 st = s; /* gcc fix */
5395 5004 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5396 5005 (void) snprintf(info, sizeof (info),
5397 5006 "task %p, lu failed to abort ret=%llx", (void *)task, st);
5398 5007 } else if ((iof & STMF_IOF_LU_DONE) == 0) {
5399 5008 (void) snprintf(info, sizeof (info),
5400 5009 "Task aborted but LU is not finished, task ="
5401 5010 "%p, s=%llx, iof=%x", (void *)task, st, iof);
5402 5011 } else {
5403 5012 /*
5404 5013 * LU abort successfully
5405 5014 */
5406 5015 atomic_and_32(&itask->itask_flags, ~ITASK_KNOWN_TO_LU);
5407 5016 return;
5408 5017 }
5409 5018
5410 5019 stmf_abort_task_offline(task, 1, info);
5411 5020 }
5412 5021
5413 5022 void
5414 5023 stmf_task_lport_aborted(scsi_task_t *task, stmf_status_t s, uint32_t iof)
5415 5024 {
5416 5025 char info[STMF_CHANGE_INFO_LEN];
5417 5026 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task);
5418 5027 unsigned long long st;
5419 5028 uint32_t old, new;
5420 5029
5421 5030 stmf_task_audit(itask, TE_TASK_LPORT_ABORTED, iof, NULL);
5422 5031
5423 5032 st = s;
5424 5033 if ((s != STMF_ABORT_SUCCESS) && (s != STMF_NOT_FOUND)) {
5425 5034 (void) snprintf(info, sizeof (info),
5426 5035 "task %p, tgt port failed to abort ret=%llx", (void *)task,
5427 5036 st);
5428 5037 } else if ((iof & STMF_IOF_LPORT_DONE) == 0) {
5429 5038 (void) snprintf(info, sizeof (info),
5430 5039 "Task aborted but tgt port is not finished, "
5431 5040 "task=%p, s=%llx, iof=%x", (void *)task, st, iof);
5432 5041 } else {
5433 5042 /*
5434 5043 * LPORT abort successfully
5435 5044 */
5436 5045 do {
5437 5046 old = new = itask->itask_flags;
5438 5047 if (!(old & ITASK_KNOWN_TO_TGT_PORT))
5439 5048 return;
5440 5049 new &= ~ITASK_KNOWN_TO_TGT_PORT;
5441 5050 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5442 5051 return;
5443 5052 }
5444 5053
5445 5054 stmf_abort_task_offline(task, 0, info);
5446 5055 }
5447 5056
5448 5057 stmf_status_t
5449 5058 stmf_task_poll_lu(scsi_task_t *task, uint32_t timeout)
5450 5059 {
5451 5060 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5452 5061 task->task_stmf_private;
5453 5062 stmf_worker_t *w = itask->itask_worker;
5454 5063 int i;
5455 5064
5456 5065 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_LU);
5457 5066 mutex_enter(&w->worker_lock);
5458 5067 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5459 5068 mutex_exit(&w->worker_lock);
5460 5069 return (STMF_BUSY);
5461 5070 }
5462 5071 for (i = 0; i < itask->itask_ncmds; i++) {
5463 5072 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LU) {
5464 5073 mutex_exit(&w->worker_lock);
5465 5074 return (STMF_SUCCESS);
5466 5075 }
5467 5076 }
5468 5077 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LU;
5469 5078 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5470 5079 itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5471 5080 } else {
5472 5081 clock_t t = drv_usectohz(timeout * 1000);
5473 5082 if (t == 0)
5474 5083 t = 1;
5475 5084 itask->itask_poll_timeout = ddi_get_lbolt() + t;
5476 5085 }
5477 5086 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5478 5087 itask->itask_worker_next = NULL;
5479 5088 if (w->worker_task_tail) {
5480 5089 w->worker_task_tail->itask_worker_next = itask;
5481 5090 } else {
5482 5091 w->worker_task_head = itask;
5483 5092 }
5484 5093 w->worker_task_tail = itask;
5485 5094 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5486 5095 w->worker_max_qdepth_pu = w->worker_queue_depth;
5487 5096 }
5488 5097 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE);
5489 5098 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5490 5099 cv_signal(&w->worker_cv);
5491 5100 }
5492 5101 mutex_exit(&w->worker_lock);
5493 5102 return (STMF_SUCCESS);
5494 5103 }
5495 5104
5496 5105 stmf_status_t
5497 5106 stmf_task_poll_lport(scsi_task_t *task, uint32_t timeout)
5498 5107 {
5499 5108 stmf_i_scsi_task_t *itask = (stmf_i_scsi_task_t *)
5500 5109 task->task_stmf_private;
5501 5110 stmf_worker_t *w = itask->itask_worker;
5502 5111 int i;
5503 5112
5504 5113 ASSERT(itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT);
5505 5114 mutex_enter(&w->worker_lock);
5506 5115 if (itask->itask_ncmds >= ITASK_MAX_NCMDS) {
5507 5116 mutex_exit(&w->worker_lock);
5508 5117 return (STMF_BUSY);
5509 5118 }
5510 5119 for (i = 0; i < itask->itask_ncmds; i++) {
5511 5120 if (itask->itask_cmd_stack[i] == ITASK_CMD_POLL_LPORT) {
5512 5121 mutex_exit(&w->worker_lock);
5513 5122 return (STMF_SUCCESS);
5514 5123 }
5515 5124 }
5516 5125 itask->itask_cmd_stack[itask->itask_ncmds++] = ITASK_CMD_POLL_LPORT;
5517 5126 if (timeout == ITASK_DEFAULT_POLL_TIMEOUT) {
5518 5127 itask->itask_poll_timeout = ddi_get_lbolt() + 1;
5519 5128 } else {
5520 5129 clock_t t = drv_usectohz(timeout * 1000);
5521 5130 if (t == 0)
5522 5131 t = 1;
5523 5132 itask->itask_poll_timeout = ddi_get_lbolt() + t;
5524 5133 }
5525 5134 if ((itask->itask_flags & ITASK_IN_WORKER_QUEUE) == 0) {
5526 5135 itask->itask_worker_next = NULL;
5527 5136 if (w->worker_task_tail) {
5528 5137 w->worker_task_tail->itask_worker_next = itask;
5529 5138 } else {
5530 5139 w->worker_task_head = itask;
5531 5140 }
5532 5141 w->worker_task_tail = itask;
5533 5142 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) {
5534 5143 w->worker_max_qdepth_pu = w->worker_queue_depth;
5535 5144 }
5536 5145 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
5537 5146 cv_signal(&w->worker_cv);
5538 5147 }
5539 5148 mutex_exit(&w->worker_lock);
5540 5149 return (STMF_SUCCESS);
5541 5150 }
5542 5151
5543 5152 void
5544 5153 stmf_do_task_abort(scsi_task_t *task)
5545 5154 {
5546 5155 stmf_i_scsi_task_t *itask = TASK_TO_ITASK(task);
5547 5156 stmf_lu_t *lu;
5548 5157 stmf_local_port_t *lport;
5549 5158 unsigned long long ret;
5550 5159 uint32_t old, new;
5551 5160 uint8_t call_lu_abort, call_port_abort;
5552 5161 char info[STMF_CHANGE_INFO_LEN];
5553 5162
5554 5163 lu = task->task_lu;
5555 5164 lport = task->task_lport;
5556 5165 do {
5557 5166 old = new = itask->itask_flags;
5558 5167 if ((old & (ITASK_KNOWN_TO_LU | ITASK_LU_ABORT_CALLED)) ==
5559 5168 ITASK_KNOWN_TO_LU) {
5560 5169 new |= ITASK_LU_ABORT_CALLED;
5561 5170 call_lu_abort = 1;
5562 5171 } else {
5563 5172 call_lu_abort = 0;
5564 5173 }
5565 5174 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5566 5175
5567 5176 if (call_lu_abort) {
5568 5177 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0) {
5569 5178 ret = lu->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5570 5179 } else {
5571 5180 ret = dlun0->lu_abort(lu, STMF_LU_ABORT_TASK, task, 0);
5572 5181 }
5573 5182 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5574 5183 stmf_task_lu_aborted(task, ret, STMF_IOF_LU_DONE);
5575 5184 } else if (ret == STMF_BUSY) {
5576 5185 atomic_and_32(&itask->itask_flags,
5577 5186 ~ITASK_LU_ABORT_CALLED);
5578 5187 } else if (ret != STMF_SUCCESS) {
5579 5188 (void) snprintf(info, sizeof (info),
5580 5189 "Abort failed by LU %p, ret %llx", (void *)lu, ret);
5581 5190 stmf_abort_task_offline(task, 1, info);
5582 5191 }
5583 5192 } else if (itask->itask_flags & ITASK_KNOWN_TO_LU) {
5584 5193 if (ddi_get_lbolt() > (itask->itask_start_time +
5585 5194 STMF_SEC2TICK(lu->lu_abort_timeout?
5586 5195 lu->lu_abort_timeout : ITASK_DEFAULT_ABORT_TIMEOUT))) {
5587 5196 (void) snprintf(info, sizeof (info),
5588 5197 "lu abort timed out");
5589 5198 stmf_abort_task_offline(itask->itask_task, 1, info);
5590 5199 }
5591 5200 }
5592 5201
5593 5202 do {
5594 5203 old = new = itask->itask_flags;
5595 5204 if ((old & (ITASK_KNOWN_TO_TGT_PORT |
5596 5205 ITASK_TGT_PORT_ABORT_CALLED)) == ITASK_KNOWN_TO_TGT_PORT) {
5597 5206 new |= ITASK_TGT_PORT_ABORT_CALLED;
5598 5207 call_port_abort = 1;
5599 5208 } else {
5600 5209 call_port_abort = 0;
5601 5210 }
5602 5211 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
5603 5212 if (call_port_abort) {
5604 5213 ret = lport->lport_abort(lport, STMF_LPORT_ABORT_TASK, task, 0);
5605 5214 if ((ret == STMF_ABORT_SUCCESS) || (ret == STMF_NOT_FOUND)) {
5606 5215 stmf_task_lport_aborted(task, ret, STMF_IOF_LPORT_DONE);
5607 5216 } else if (ret == STMF_BUSY) {
5608 5217 atomic_and_32(&itask->itask_flags,
5609 5218 ~ITASK_TGT_PORT_ABORT_CALLED);
5610 5219 } else if (ret != STMF_SUCCESS) {
5611 5220 (void) snprintf(info, sizeof (info),
5612 5221 "Abort failed by tgt port %p ret %llx",
5613 5222 (void *)lport, ret);
5614 5223 stmf_abort_task_offline(task, 0, info);
5615 5224 }
5616 5225 } else if (itask->itask_flags & ITASK_KNOWN_TO_TGT_PORT) {
5617 5226 if (ddi_get_lbolt() > (itask->itask_start_time +
5618 5227 STMF_SEC2TICK(lport->lport_abort_timeout?
5619 5228 lport->lport_abort_timeout :
5620 5229 ITASK_DEFAULT_ABORT_TIMEOUT))) {
5621 5230 (void) snprintf(info, sizeof (info),
5622 5231 "lport abort timed out");
5623 5232 stmf_abort_task_offline(itask->itask_task, 0, info);
5624 5233 }
5625 5234 }
5626 5235 }
5627 5236
5628 5237 stmf_status_t
5629 5238 stmf_ctl(int cmd, void *obj, void *arg)
5630 5239 {
5631 5240 stmf_status_t ret;
5632 5241 stmf_i_lu_t *ilu;
5633 5242 stmf_i_local_port_t *ilport;
5634 5243 stmf_state_change_info_t *ssci = (stmf_state_change_info_t *)arg;
5635 5244
5636 5245 mutex_enter(&stmf_state.stmf_lock);
5637 5246 ret = STMF_INVALID_ARG;
5638 5247 if (cmd & STMF_CMD_LU_OP) {
5639 5248 ilu = stmf_lookup_lu((stmf_lu_t *)obj);
5640 5249 if (ilu == NULL) {
5641 5250 goto stmf_ctl_lock_exit;
5642 5251 }
5643 5252 DTRACE_PROBE3(lu__state__change,
5644 5253 stmf_lu_t *, ilu->ilu_lu,
5645 5254 int, cmd, stmf_state_change_info_t *, ssci);
5646 5255 } else if (cmd & STMF_CMD_LPORT_OP) {
5647 5256 ilport = stmf_lookup_lport((stmf_local_port_t *)obj);
5648 5257 if (ilport == NULL) {
5649 5258 goto stmf_ctl_lock_exit;
5650 5259 }
5651 5260 DTRACE_PROBE3(lport__state__change,
5652 5261 stmf_local_port_t *, ilport->ilport_lport,
5653 5262 int, cmd, stmf_state_change_info_t *, ssci);
5654 5263 } else {
5655 5264 goto stmf_ctl_lock_exit;
5656 5265 }
5657 5266
5658 5267 switch (cmd) {
5659 5268 case STMF_CMD_LU_ONLINE:
5660 5269 switch (ilu->ilu_state) {
5661 5270 case STMF_STATE_OFFLINE:
5662 5271 ret = STMF_SUCCESS;
5663 5272 break;
5664 5273 case STMF_STATE_ONLINE:
5665 5274 case STMF_STATE_ONLINING:
5666 5275 ret = STMF_ALREADY;
5667 5276 break;
5668 5277 case STMF_STATE_OFFLINING:
5669 5278 ret = STMF_BUSY;
5670 5279 break;
5671 5280 default:
5672 5281 ret = STMF_BADSTATE;
5673 5282 break;
5674 5283 }
5675 5284 if (ret != STMF_SUCCESS)
5676 5285 goto stmf_ctl_lock_exit;
5677 5286
5678 5287 ilu->ilu_state = STMF_STATE_ONLINING;
5679 5288 mutex_exit(&stmf_state.stmf_lock);
5680 5289 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5681 5290 break;
5682 5291
5683 5292 case STMF_CMD_LU_ONLINE_COMPLETE:
5684 5293 if (ilu->ilu_state != STMF_STATE_ONLINING) {
5685 5294 ret = STMF_BADSTATE;
5686 5295 goto stmf_ctl_lock_exit;
5687 5296 }
5688 5297 if (((stmf_change_status_t *)arg)->st_completion_status ==
5689 5298 STMF_SUCCESS) {
5690 5299 ilu->ilu_state = STMF_STATE_ONLINE;
5691 5300 mutex_exit(&stmf_state.stmf_lock);
5692 5301 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj,
5693 5302 STMF_ACK_LU_ONLINE_COMPLETE, arg);
5694 5303 mutex_enter(&stmf_state.stmf_lock);
5695 5304 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj);
5696 5305 } else {
5697 5306 /* XXX: should throw a meesage an record more data */
5698 5307 ilu->ilu_state = STMF_STATE_OFFLINE;
5699 5308 }
5700 5309 ret = STMF_SUCCESS;
5701 5310 goto stmf_ctl_lock_exit;
5702 5311
5703 5312 case STMF_CMD_LU_OFFLINE:
5704 5313 switch (ilu->ilu_state) {
5705 5314 case STMF_STATE_ONLINE:
5706 5315 ret = STMF_SUCCESS;
5707 5316 break;
5708 5317 case STMF_STATE_OFFLINE:
5709 5318 case STMF_STATE_OFFLINING:
5710 5319 ret = STMF_ALREADY;
5711 5320 break;
5712 5321 case STMF_STATE_ONLINING:
5713 5322 ret = STMF_BUSY;
5714 5323 break;
5715 5324 default:
5716 5325 ret = STMF_BADSTATE;
5717 5326 break;
5718 5327 }
5719 5328 if (ret != STMF_SUCCESS)
5720 5329 goto stmf_ctl_lock_exit;
5721 5330 ilu->ilu_state = STMF_STATE_OFFLINING;
5722 5331 mutex_exit(&stmf_state.stmf_lock);
5723 5332 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5724 5333 break;
5725 5334
5726 5335 case STMF_CMD_LU_OFFLINE_COMPLETE:
5727 5336 if (ilu->ilu_state != STMF_STATE_OFFLINING) {
5728 5337 ret = STMF_BADSTATE;
5729 5338 goto stmf_ctl_lock_exit;
5730 5339 }
5731 5340 if (((stmf_change_status_t *)arg)->st_completion_status ==
5732 5341 STMF_SUCCESS) {
5733 5342 ilu->ilu_state = STMF_STATE_OFFLINE;
5734 5343 mutex_exit(&stmf_state.stmf_lock);
5735 5344 ((stmf_lu_t *)obj)->lu_ctl((stmf_lu_t *)obj,
5736 5345 STMF_ACK_LU_OFFLINE_COMPLETE, arg);
5737 5346 mutex_enter(&stmf_state.stmf_lock);
5738 5347 } else {
5739 5348 ilu->ilu_state = STMF_STATE_ONLINE;
5740 5349 stmf_add_lu_to_active_sessions((stmf_lu_t *)obj);
5741 5350 }
5742 5351 mutex_exit(&stmf_state.stmf_lock);
5743 5352 break;
5744 5353
5745 5354 /*
5746 5355 * LPORT_ONLINE/OFFLINE has nothing to do with link offline/online.
5747 5356 * It's related with hardware disable/enable.
5748 5357 */
5749 5358 case STMF_CMD_LPORT_ONLINE:
5750 5359 switch (ilport->ilport_state) {
5751 5360 case STMF_STATE_OFFLINE:
5752 5361 ret = STMF_SUCCESS;
5753 5362 break;
5754 5363 case STMF_STATE_ONLINE:
5755 5364 case STMF_STATE_ONLINING:
5756 5365 ret = STMF_ALREADY;
5757 5366 break;
5758 5367 case STMF_STATE_OFFLINING:
5759 5368 ret = STMF_BUSY;
5760 5369 break;
5761 5370 default:
5762 5371 ret = STMF_BADSTATE;
5763 5372 break;
5764 5373 }
5765 5374 if (ret != STMF_SUCCESS)
5766 5375 goto stmf_ctl_lock_exit;
5767 5376
5768 5377 /*
5769 5378 * Only user request can recover the port from the
5770 5379 * FORCED_OFFLINE state
5771 5380 */
5772 5381 if (ilport->ilport_flags & ILPORT_FORCED_OFFLINE) {
5773 5382 if (!(ssci->st_rflags & STMF_RFLAG_USER_REQUEST)) {
5774 5383 ret = STMF_FAILURE;
5775 5384 goto stmf_ctl_lock_exit;
5776 5385 }
5777 5386 }
5778 5387
5779 5388 /*
5780 5389 * Avoid too frequent request to online
5781 5390 */
5782 5391 if (ssci->st_rflags & STMF_RFLAG_USER_REQUEST) {
5783 5392 ilport->ilport_online_times = 0;
5784 5393 ilport->ilport_avg_interval = 0;
5785 5394 }
5786 5395 if ((ilport->ilport_avg_interval < STMF_AVG_ONLINE_INTERVAL) &&
5787 5396 (ilport->ilport_online_times >= 4)) {
5788 5397 ret = STMF_FAILURE;
5789 5398 ilport->ilport_flags |= ILPORT_FORCED_OFFLINE;
5790 5399 stmf_trace(NULL, "stmf_ctl: too frequent request to "
5791 5400 "online the port");
5792 5401 cmn_err(CE_WARN, "stmf_ctl: too frequent request to "
5793 5402 "online the port, set FORCED_OFFLINE now");
5794 5403 goto stmf_ctl_lock_exit;
5795 5404 }
5796 5405 if (ilport->ilport_online_times > 0) {
5797 5406 if (ilport->ilport_online_times == 1) {
5798 5407 ilport->ilport_avg_interval = ddi_get_lbolt() -
5799 5408 ilport->ilport_last_online_clock;
5800 5409 } else {
5801 5410 ilport->ilport_avg_interval =
5802 5411 (ilport->ilport_avg_interval +
5803 5412 ddi_get_lbolt() -
5804 5413 ilport->ilport_last_online_clock) >> 1;
5805 5414 }
5806 5415 }
5807 5416 ilport->ilport_last_online_clock = ddi_get_lbolt();
5808 5417 ilport->ilport_online_times++;
5809 5418
5810 5419 /*
5811 5420 * Submit online service request
5812 5421 */
5813 5422 ilport->ilport_flags &= ~ILPORT_FORCED_OFFLINE;
5814 5423 ilport->ilport_state = STMF_STATE_ONLINING;
5815 5424 mutex_exit(&stmf_state.stmf_lock);
5816 5425 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5817 5426 break;
5818 5427
5819 5428 case STMF_CMD_LPORT_ONLINE_COMPLETE:
5820 5429 if (ilport->ilport_state != STMF_STATE_ONLINING) {
5821 5430 ret = STMF_BADSTATE;
5822 5431 goto stmf_ctl_lock_exit;
5823 5432 }
5824 5433 if (((stmf_change_status_t *)arg)->st_completion_status ==
5825 5434 STMF_SUCCESS) {
5826 5435 ilport->ilport_state = STMF_STATE_ONLINE;
5827 5436 mutex_exit(&stmf_state.stmf_lock);
5828 5437 ((stmf_local_port_t *)obj)->lport_ctl(
5829 5438 (stmf_local_port_t *)obj,
5830 5439 STMF_ACK_LPORT_ONLINE_COMPLETE, arg);
5831 5440 mutex_enter(&stmf_state.stmf_lock);
5832 5441 } else {
5833 5442 ilport->ilport_state = STMF_STATE_OFFLINE;
5834 5443 }
5835 5444 ret = STMF_SUCCESS;
5836 5445 goto stmf_ctl_lock_exit;
5837 5446
5838 5447 case STMF_CMD_LPORT_OFFLINE:
5839 5448 switch (ilport->ilport_state) {
5840 5449 case STMF_STATE_ONLINE:
5841 5450 ret = STMF_SUCCESS;
5842 5451 break;
5843 5452 case STMF_STATE_OFFLINE:
5844 5453 case STMF_STATE_OFFLINING:
5845 5454 ret = STMF_ALREADY;
5846 5455 break;
5847 5456 case STMF_STATE_ONLINING:
5848 5457 ret = STMF_BUSY;
5849 5458 break;
5850 5459 default:
5851 5460 ret = STMF_BADSTATE;
5852 5461 break;
5853 5462 }
5854 5463 if (ret != STMF_SUCCESS)
5855 5464 goto stmf_ctl_lock_exit;
5856 5465
5857 5466 ilport->ilport_state = STMF_STATE_OFFLINING;
5858 5467 mutex_exit(&stmf_state.stmf_lock);
5859 5468 stmf_svc_queue(cmd, obj, (stmf_state_change_info_t *)arg);
5860 5469 break;
5861 5470
5862 5471 case STMF_CMD_LPORT_OFFLINE_COMPLETE:
5863 5472 if (ilport->ilport_state != STMF_STATE_OFFLINING) {
5864 5473 ret = STMF_BADSTATE;
5865 5474 goto stmf_ctl_lock_exit;
5866 5475 }
5867 5476 if (((stmf_change_status_t *)arg)->st_completion_status ==
5868 5477 STMF_SUCCESS) {
5869 5478 ilport->ilport_state = STMF_STATE_OFFLINE;
5870 5479 mutex_exit(&stmf_state.stmf_lock);
5871 5480 ((stmf_local_port_t *)obj)->lport_ctl(
5872 5481 (stmf_local_port_t *)obj,
5873 5482 STMF_ACK_LPORT_OFFLINE_COMPLETE, arg);
5874 5483 mutex_enter(&stmf_state.stmf_lock);
5875 5484 } else {
5876 5485 ilport->ilport_state = STMF_STATE_ONLINE;
5877 5486 }
5878 5487 mutex_exit(&stmf_state.stmf_lock);
5879 5488 break;
5880 5489
5881 5490 default:
5882 5491 cmn_err(CE_WARN, "Invalid ctl cmd received %x", cmd);
5883 5492 ret = STMF_INVALID_ARG;
5884 5493 goto stmf_ctl_lock_exit;
5885 5494 }
5886 5495
5887 5496 return (STMF_SUCCESS);
5888 5497
5889 5498 stmf_ctl_lock_exit:;
5890 5499 mutex_exit(&stmf_state.stmf_lock);
5891 5500 return (ret);
5892 5501 }
5893 5502
5894 5503 /* ARGSUSED */
5895 5504 stmf_status_t
5896 5505 stmf_info_impl(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf,
5897 5506 uint32_t *bufsizep)
5898 5507 {
5899 5508 return (STMF_NOT_SUPPORTED);
5900 5509 }
5901 5510
5902 5511 /* ARGSUSED */
5903 5512 stmf_status_t
5904 5513 stmf_info(uint32_t cmd, void *arg1, void *arg2, uint8_t *buf,
5905 5514 uint32_t *bufsizep)
5906 5515 {
5907 5516 uint32_t cl = SI_GET_CLASS(cmd);
5908 5517
5909 5518 if (cl == SI_STMF) {
5910 5519 return (stmf_info_impl(cmd, arg1, arg2, buf, bufsizep));
5911 5520 }
5912 5521 if (cl == SI_LPORT) {
5913 5522 return (((stmf_local_port_t *)arg1)->lport_info(cmd, arg1,
5914 5523 arg2, buf, bufsizep));
5915 5524 } else if (cl == SI_LU) {
5916 5525 return (((stmf_lu_t *)arg1)->lu_info(cmd, arg1, arg2, buf,
5917 5526 bufsizep));
5918 5527 }
5919 5528
5920 5529 return (STMF_NOT_SUPPORTED);
5921 5530 }
5922 5531
5923 5532 /*
5924 5533 * Used by port providers. pwwn is 8 byte wwn, sdid is the devid used by
5925 5534 * stmf to register local ports. The ident should have 20 bytes in buffer
5926 5535 * space to convert the wwn to "wwn.xxxxxxxxxxxxxxxx" string.
5927 5536 */
5928 5537 void
5929 5538 stmf_wwn_to_devid_desc(scsi_devid_desc_t *sdid, uint8_t *wwn,
5930 5539 uint8_t protocol_id)
5931 5540 {
5932 5541 char wwn_str[20+1];
5933 5542
5934 5543 sdid->protocol_id = protocol_id;
5935 5544 sdid->piv = 1;
5936 5545 sdid->code_set = CODE_SET_ASCII;
5937 5546 sdid->association = ID_IS_TARGET_PORT;
5938 5547 sdid->ident_length = 20;
5939 5548 /* Convert wwn value to "wwn.XXXXXXXXXXXXXXXX" format */
5940 5549 (void) snprintf(wwn_str, sizeof (wwn_str),
5941 5550 "wwn.%02X%02X%02X%02X%02X%02X%02X%02X",
5942 5551 wwn[0], wwn[1], wwn[2], wwn[3], wwn[4], wwn[5], wwn[6], wwn[7]);
5943 5552 bcopy(wwn_str, (char *)sdid->ident, 20);
5944 5553 }
5945 5554
5946 5555
5947 5556 stmf_xfer_data_t *
5948 5557 stmf_prepare_tpgs_data(uint8_t ilu_alua)
5949 5558 {
5950 5559 stmf_xfer_data_t *xd;
5951 5560 stmf_i_local_port_t *ilport;
5952 5561 uint8_t *p;
5953 5562 uint32_t sz, asz, nports = 0, nports_standby = 0;
5954 5563
5955 5564 mutex_enter(&stmf_state.stmf_lock);
5956 5565 /* check if any ports are standby and create second group */
5957 5566 for (ilport = stmf_state.stmf_ilportlist; ilport;
5958 5567 ilport = ilport->ilport_next) {
5959 5568 if (ilport->ilport_standby == 1) {
5960 5569 nports_standby++;
5961 5570 } else {
5962 5571 nports++;
5963 5572 }
5964 5573 }
5965 5574
5966 5575 /* The spec only allows for 255 ports to be reported per group */
5967 5576 nports = min(nports, 255);
5968 5577 nports_standby = min(nports_standby, 255);
5969 5578 sz = (nports * 4) + 12;
5970 5579 if (nports_standby && ilu_alua) {
5971 5580 sz += (nports_standby * 4) + 8;
5972 5581 }
5973 5582 asz = sz + sizeof (*xd) - 4;
5974 5583 xd = (stmf_xfer_data_t *)kmem_zalloc(asz, KM_NOSLEEP);
5975 5584 if (xd == NULL) {
5976 5585 mutex_exit(&stmf_state.stmf_lock);
5977 5586 return (NULL);
5978 5587 }
5979 5588 xd->alloc_size = asz;
5980 5589 xd->size_left = sz;
5981 5590
5982 5591 p = xd->buf;
5983 5592
5984 5593 *((uint32_t *)p) = BE_32(sz - 4);
5985 5594 p += 4;
5986 5595 p[0] = 0x80; /* PREF */
5987 5596 p[1] = 5; /* AO_SUP, S_SUP */
5988 5597 if (stmf_state.stmf_alua_node == 1) {
5989 5598 p[3] = 1; /* Group 1 */
5990 5599 } else {
5991 5600 p[3] = 0; /* Group 0 */
5992 5601 }
5993 5602 p[7] = nports & 0xff;
5994 5603 p += 8;
5995 5604 for (ilport = stmf_state.stmf_ilportlist; ilport;
5996 5605 ilport = ilport->ilport_next) {
5997 5606 if (ilport->ilport_standby == 1) {
5998 5607 continue;
5999 5608 }
6000 5609 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid);
6001 5610 p += 4;
6002 5611 }
6003 5612 if (nports_standby && ilu_alua) {
6004 5613 p[0] = 0x02; /* Non PREF, Standby */
6005 5614 p[1] = 5; /* AO_SUP, S_SUP */
6006 5615 if (stmf_state.stmf_alua_node == 1) {
6007 5616 p[3] = 0; /* Group 0 */
6008 5617 } else {
6009 5618 p[3] = 1; /* Group 1 */
6010 5619 }
6011 5620 p[7] = nports_standby & 0xff;
6012 5621 p += 8;
6013 5622 for (ilport = stmf_state.stmf_ilportlist; ilport;
6014 5623 ilport = ilport->ilport_next) {
6015 5624 if (ilport->ilport_standby == 0) {
6016 5625 continue;
6017 5626 }
6018 5627 ((uint16_t *)p)[1] = BE_16(ilport->ilport_rtpid);
6019 5628 p += 4;
6020 5629 }
6021 5630 }
6022 5631
6023 5632 mutex_exit(&stmf_state.stmf_lock);
6024 5633
6025 5634 return (xd);
6026 5635 }
6027 5636
6028 5637 struct scsi_devid_desc *
6029 5638 stmf_scsilib_get_devid_desc(uint16_t rtpid)
6030 5639 {
6031 5640 scsi_devid_desc_t *devid = NULL;
6032 5641 stmf_i_local_port_t *ilport;
6033 5642
6034 5643 mutex_enter(&stmf_state.stmf_lock);
6035 5644
6036 5645 for (ilport = stmf_state.stmf_ilportlist; ilport;
6037 5646 ilport = ilport->ilport_next) {
6038 5647 if (ilport->ilport_rtpid == rtpid) {
6039 5648 scsi_devid_desc_t *id = ilport->ilport_lport->lport_id;
6040 5649 uint32_t id_sz = sizeof (scsi_devid_desc_t) +
6041 5650 id->ident_length;
6042 5651 devid = (scsi_devid_desc_t *)kmem_zalloc(id_sz,
6043 5652 KM_NOSLEEP);
6044 5653 if (devid != NULL) {
6045 5654 bcopy(id, devid, id_sz);
6046 5655 }
6047 5656 break;
6048 5657 }
6049 5658 }
6050 5659
6051 5660 mutex_exit(&stmf_state.stmf_lock);
6052 5661 return (devid);
6053 5662 }
6054 5663
6055 5664 uint16_t
6056 5665 stmf_scsilib_get_lport_rtid(struct scsi_devid_desc *devid)
6057 5666 {
6058 5667 stmf_i_local_port_t *ilport;
6059 5668 scsi_devid_desc_t *id;
6060 5669 uint16_t rtpid = 0;
6061 5670
6062 5671 mutex_enter(&stmf_state.stmf_lock);
6063 5672 for (ilport = stmf_state.stmf_ilportlist; ilport;
6064 5673 ilport = ilport->ilport_next) {
6065 5674 id = ilport->ilport_lport->lport_id;
6066 5675 if ((devid->ident_length == id->ident_length) &&
6067 5676 (memcmp(devid->ident, id->ident, id->ident_length) == 0)) {
6068 5677 rtpid = ilport->ilport_rtpid;
6069 5678 break;
6070 5679 }
6071 5680 }
6072 5681 mutex_exit(&stmf_state.stmf_lock);
6073 5682 return (rtpid);
6074 5683 }
6075 5684
6076 5685 static uint16_t stmf_lu_id_gen_number = 0;
6077 5686
6078 5687 stmf_status_t
6079 5688 stmf_scsilib_uniq_lu_id(uint32_t company_id, scsi_devid_desc_t *lu_id)
6080 5689 {
6081 5690 return (stmf_scsilib_uniq_lu_id2(company_id, 0, lu_id));
6082 5691 }
6083 5692
|
↓ open down ↓ |
1496 lines elided |
↑ open up ↑ |
6084 5693 stmf_status_t
6085 5694 stmf_scsilib_uniq_lu_id2(uint32_t company_id, uint32_t host_id,
6086 5695 scsi_devid_desc_t *lu_id)
6087 5696 {
6088 5697 uint8_t *p;
6089 5698 struct timeval32 timestamp32;
6090 5699 uint32_t *t = (uint32_t *)×tamp32;
6091 5700 struct ether_addr mac;
6092 5701 uint8_t *e = (uint8_t *)&mac;
6093 5702 int hid = (int)host_id;
5703 + uint16_t gen_number;
6094 5704
6095 5705 if (company_id == COMPANY_ID_NONE)
6096 5706 company_id = COMPANY_ID_SUN;
6097 5707
6098 5708 if (lu_id->ident_length != 0x10)
6099 5709 return (STMF_INVALID_ARG);
6100 5710
6101 5711 p = (uint8_t *)lu_id;
6102 5712
6103 - atomic_add_16(&stmf_lu_id_gen_number, 1);
5713 + gen_number = atomic_add_16_nv(&stmf_lu_id_gen_number, 1);
6104 5714
6105 5715 p[0] = 0xf1; p[1] = 3; p[2] = 0; p[3] = 0x10;
6106 5716 p[4] = ((company_id >> 20) & 0xf) | 0x60;
6107 5717 p[5] = (company_id >> 12) & 0xff;
6108 5718 p[6] = (company_id >> 4) & 0xff;
6109 5719 p[7] = (company_id << 4) & 0xf0;
6110 5720 if (hid == 0 && !localetheraddr((struct ether_addr *)NULL, &mac)) {
6111 5721 hid = BE_32((int)zone_get_hostid(NULL));
6112 5722 }
6113 5723 if (hid != 0) {
6114 5724 e[0] = (hid >> 24) & 0xff;
6115 5725 e[1] = (hid >> 16) & 0xff;
6116 5726 e[2] = (hid >> 8) & 0xff;
6117 5727 e[3] = hid & 0xff;
6118 5728 e[4] = e[5] = 0;
6119 5729 }
6120 5730 bcopy(e, p+8, 6);
6121 5731 uniqtime32(×tamp32);
6122 5732 *t = BE_32(*t);
6123 5733 bcopy(t, p+14, 4);
6124 - p[18] = (stmf_lu_id_gen_number >> 8) & 0xff;
6125 - p[19] = stmf_lu_id_gen_number & 0xff;
5734 + p[18] = (gen_number >> 8) & 0xff;
5735 + p[19] = gen_number & 0xff;
6126 5736
6127 5737 return (STMF_SUCCESS);
6128 5738 }
6129 5739
6130 5740 /*
6131 5741 * saa is sense key, ASC, ASCQ
6132 5742 */
6133 5743 void
6134 5744 stmf_scsilib_send_status(scsi_task_t *task, uint8_t st, uint32_t saa)
6135 5745 {
6136 5746 uint8_t sd[18];
6137 5747 task->task_scsi_status = st;
6138 5748 if (st == 2) {
6139 5749 bzero(sd, 18);
6140 5750 sd[0] = 0x70;
6141 5751 sd[2] = (saa >> 16) & 0xf;
6142 5752 sd[7] = 10;
6143 5753 sd[12] = (saa >> 8) & 0xff;
6144 5754 sd[13] = saa & 0xff;
6145 5755 task->task_sense_data = sd;
6146 5756 task->task_sense_length = 18;
6147 5757 } else {
6148 5758 task->task_sense_data = NULL;
6149 5759 task->task_sense_length = 0;
6150 5760 }
6151 5761 (void) stmf_send_scsi_status(task, STMF_IOF_LU_DONE);
6152 5762 }
6153 5763
6154 5764 uint32_t
6155 5765 stmf_scsilib_prepare_vpd_page83(scsi_task_t *task, uint8_t *page,
6156 5766 uint32_t page_len, uint8_t byte0, uint32_t vpd_mask)
6157 5767 {
6158 5768 uint8_t *p = NULL;
6159 5769 uint8_t small_buf[32];
6160 5770 uint32_t sz = 0;
6161 5771 uint32_t n = 4;
6162 5772 uint32_t m = 0;
6163 5773 uint32_t last_bit = 0;
6164 5774
6165 5775 if (page_len < 4)
6166 5776 return (0);
6167 5777 if (page_len > 65535)
6168 5778 page_len = 65535;
6169 5779
6170 5780 page[0] = byte0;
6171 5781 page[1] = 0x83;
6172 5782
6173 5783 /* CONSTCOND */
6174 5784 while (1) {
6175 5785 m += sz;
6176 5786 if (sz && (page_len > n)) {
6177 5787 uint32_t copysz;
6178 5788 copysz = page_len > (n + sz) ? sz : page_len - n;
6179 5789 bcopy(p, page + n, copysz);
6180 5790 n += copysz;
6181 5791 }
6182 5792 vpd_mask &= ~last_bit;
6183 5793 if (vpd_mask == 0)
6184 5794 break;
6185 5795
6186 5796 if (vpd_mask & STMF_VPD_LU_ID) {
6187 5797 last_bit = STMF_VPD_LU_ID;
6188 5798 sz = task->task_lu->lu_id->ident_length + 4;
6189 5799 p = (uint8_t *)task->task_lu->lu_id;
6190 5800 continue;
6191 5801 } else if (vpd_mask & STMF_VPD_TARGET_ID) {
6192 5802 last_bit = STMF_VPD_TARGET_ID;
6193 5803 sz = task->task_lport->lport_id->ident_length + 4;
6194 5804 p = (uint8_t *)task->task_lport->lport_id;
6195 5805 continue;
6196 5806 } else if (vpd_mask & STMF_VPD_TP_GROUP) {
6197 5807 stmf_i_local_port_t *ilport;
6198 5808 last_bit = STMF_VPD_TP_GROUP;
6199 5809 p = small_buf;
6200 5810 bzero(p, 8);
6201 5811 p[0] = 1;
6202 5812 p[1] = 0x15;
6203 5813 p[3] = 4;
6204 5814 ilport = (stmf_i_local_port_t *)
6205 5815 task->task_lport->lport_stmf_private;
6206 5816 /*
6207 5817 * If we're in alua mode, group 1 contains all alua
6208 5818 * participating ports and all standby ports
6209 5819 * > 255. Otherwise, if we're in alua mode, any local
6210 5820 * ports (non standby/pppt) are also in group 1 if the
6211 5821 * alua node is 1. Otherwise the group is 0.
6212 5822 */
6213 5823 if ((stmf_state.stmf_alua_state &&
6214 5824 (ilport->ilport_alua || ilport->ilport_standby) &&
6215 5825 ilport->ilport_rtpid > 255) ||
6216 5826 (stmf_state.stmf_alua_node == 1 &&
6217 5827 ilport->ilport_standby != 1)) {
6218 5828 p[7] = 1; /* Group 1 */
6219 5829 }
6220 5830 sz = 8;
6221 5831 continue;
6222 5832 } else if (vpd_mask & STMF_VPD_RELATIVE_TP_ID) {
6223 5833 stmf_i_local_port_t *ilport;
6224 5834
6225 5835 last_bit = STMF_VPD_RELATIVE_TP_ID;
6226 5836 p = small_buf;
6227 5837 bzero(p, 8);
6228 5838 p[0] = 1;
6229 5839 p[1] = 0x14;
6230 5840 p[3] = 4;
6231 5841 ilport = (stmf_i_local_port_t *)
6232 5842 task->task_lport->lport_stmf_private;
6233 5843 p[6] = (ilport->ilport_rtpid >> 8) & 0xff;
6234 5844 p[7] = ilport->ilport_rtpid & 0xff;
6235 5845 sz = 8;
6236 5846 continue;
6237 5847 } else {
6238 5848 cmn_err(CE_WARN, "Invalid vpd_mask");
6239 5849 break;
6240 5850 }
6241 5851 }
6242 5852
6243 5853 page[2] = (m >> 8) & 0xff;
6244 5854 page[3] = m & 0xff;
6245 5855
6246 5856 return (n);
6247 5857 }
6248 5858
6249 5859 void
6250 5860 stmf_scsilib_handle_report_tpgs(scsi_task_t *task, stmf_data_buf_t *dbuf)
6251 5861 {
6252 5862 stmf_i_scsi_task_t *itask =
6253 5863 (stmf_i_scsi_task_t *)task->task_stmf_private;
6254 5864 stmf_i_lu_t *ilu =
6255 5865 (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6256 5866 stmf_xfer_data_t *xd;
6257 5867 uint32_t sz, minsz;
6258 5868
6259 5869 itask->itask_flags |= ITASK_DEFAULT_HANDLING;
6260 5870 task->task_cmd_xfer_length =
6261 5871 ((((uint32_t)task->task_cdb[6]) << 24) |
6262 5872 (((uint32_t)task->task_cdb[7]) << 16) |
6263 5873 (((uint32_t)task->task_cdb[8]) << 8) |
6264 5874 ((uint32_t)task->task_cdb[9]));
6265 5875
6266 5876 if (task->task_additional_flags &
6267 5877 TASK_AF_NO_EXPECTED_XFER_LENGTH) {
6268 5878 task->task_expected_xfer_length =
6269 5879 task->task_cmd_xfer_length;
6270 5880 }
6271 5881
6272 5882 if (task->task_cmd_xfer_length == 0) {
6273 5883 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6274 5884 return;
6275 5885 }
6276 5886 if (task->task_cmd_xfer_length < 4) {
6277 5887 stmf_scsilib_send_status(task, STATUS_CHECK,
6278 5888 STMF_SAA_INVALID_FIELD_IN_CDB);
6279 5889 return;
6280 5890 }
6281 5891
6282 5892 sz = min(task->task_expected_xfer_length,
6283 5893 task->task_cmd_xfer_length);
6284 5894
6285 5895 xd = stmf_prepare_tpgs_data(ilu->ilu_alua);
6286 5896
6287 5897 if (xd == NULL) {
6288 5898 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6289 5899 STMF_ALLOC_FAILURE, NULL);
6290 5900 return;
6291 5901 }
6292 5902
6293 5903 sz = min(sz, xd->size_left);
6294 5904 xd->size_left = sz;
6295 5905 minsz = min(512, sz);
6296 5906
6297 5907 if (dbuf == NULL)
6298 5908 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
6299 5909 if (dbuf == NULL) {
6300 5910 kmem_free(xd, xd->alloc_size);
6301 5911 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
6302 5912 STMF_ALLOC_FAILURE, NULL);
6303 5913 return;
6304 5914 }
6305 5915 dbuf->db_lu_private = xd;
6306 5916 stmf_xd_to_dbuf(dbuf, 1);
6307 5917
6308 5918 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
6309 5919 (void) stmf_xfer_data(task, dbuf, 0);
6310 5920
6311 5921 }
6312 5922
6313 5923 void
6314 5924 stmf_scsilib_handle_task_mgmt(scsi_task_t *task)
6315 5925 {
6316 5926
6317 5927 switch (task->task_mgmt_function) {
6318 5928 /*
6319 5929 * For now we will abort all I/Os on the LU in case of ABORT_TASK_SET
6320 5930 * and ABORT_TASK. But unlike LUN_RESET we will not reset LU state
6321 5931 * in these cases. This needs to be changed to abort only the required
6322 5932 * set.
6323 5933 */
6324 5934 case TM_ABORT_TASK:
6325 5935 case TM_ABORT_TASK_SET:
6326 5936 case TM_CLEAR_TASK_SET:
6327 5937 case TM_LUN_RESET:
6328 5938 stmf_handle_lun_reset(task);
6329 5939 /* issue the reset to the proxy node as well */
6330 5940 if (stmf_state.stmf_alua_state == 1) {
6331 5941 (void) stmf_proxy_scsi_cmd(task, NULL);
6332 5942 }
6333 5943 return;
6334 5944 case TM_TARGET_RESET:
6335 5945 case TM_TARGET_COLD_RESET:
6336 5946 case TM_TARGET_WARM_RESET:
6337 5947 stmf_handle_target_reset(task);
6338 5948 return;
6339 5949 default:
6340 5950 /* We dont support this task mgmt function */
6341 5951 stmf_scsilib_send_status(task, STATUS_CHECK,
6342 5952 STMF_SAA_INVALID_FIELD_IN_CMD_IU);
6343 5953 return;
6344 5954 }
6345 5955 }
6346 5956
6347 5957 void
6348 5958 stmf_handle_lun_reset(scsi_task_t *task)
6349 5959 {
6350 5960 stmf_i_scsi_task_t *itask;
6351 5961 stmf_i_lu_t *ilu;
6352 5962
6353 5963 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
6354 5964 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6355 5965
6356 5966 /*
6357 5967 * To sync with target reset, grab this lock. The LU is not going
6358 5968 * anywhere as there is atleast one task pending (this task).
6359 5969 */
6360 5970 mutex_enter(&stmf_state.stmf_lock);
6361 5971
6362 5972 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
6363 5973 mutex_exit(&stmf_state.stmf_lock);
6364 5974 stmf_scsilib_send_status(task, STATUS_CHECK,
6365 5975 STMF_SAA_OPERATION_IN_PROGRESS);
6366 5976 return;
6367 5977 }
6368 5978 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE);
6369 5979 mutex_exit(&stmf_state.stmf_lock);
6370 5980
6371 5981 /*
6372 5982 * Mark this task as the one causing LU reset so that we know who
6373 5983 * was responsible for setting the ILU_RESET_ACTIVE. In case this
6374 5984 * task itself gets aborted, we will clear ILU_RESET_ACTIVE.
6375 5985 */
6376 5986 itask->itask_flags |= ITASK_DEFAULT_HANDLING | ITASK_CAUSING_LU_RESET;
6377 5987
6378 5988 /* Initiatiate abort on all commands on this LU except this one */
6379 5989 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED, task->task_lu);
6380 5990
6381 5991 /* Start polling on this task */
6382 5992 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
6383 5993 != STMF_SUCCESS) {
6384 5994 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE,
6385 5995 NULL);
6386 5996 return;
6387 5997 }
6388 5998 }
6389 5999
6390 6000 void
6391 6001 stmf_handle_target_reset(scsi_task_t *task)
6392 6002 {
6393 6003 stmf_i_scsi_task_t *itask;
6394 6004 stmf_i_lu_t *ilu;
6395 6005 stmf_i_scsi_session_t *iss;
6396 6006 stmf_lun_map_t *lm;
6397 6007 stmf_lun_map_ent_t *lm_ent;
6398 6008 int i, lf;
6399 6009
6400 6010 itask = (stmf_i_scsi_task_t *)task->task_stmf_private;
6401 6011 iss = (stmf_i_scsi_session_t *)task->task_session->ss_stmf_private;
6402 6012 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
6403 6013
6404 6014 /*
6405 6015 * To sync with LUN reset, grab this lock. The session is not going
6406 6016 * anywhere as there is atleast one task pending (this task).
6407 6017 */
6408 6018 mutex_enter(&stmf_state.stmf_lock);
6409 6019
6410 6020 /* Grab the session lock as a writer to prevent any changes in it */
6411 6021 rw_enter(iss->iss_lockp, RW_WRITER);
6412 6022
6413 6023 if (iss->iss_flags & ISS_RESET_ACTIVE) {
6414 6024 rw_exit(iss->iss_lockp);
6415 6025 mutex_exit(&stmf_state.stmf_lock);
6416 6026 stmf_scsilib_send_status(task, STATUS_CHECK,
6417 6027 STMF_SAA_OPERATION_IN_PROGRESS);
6418 6028 return;
6419 6029 }
6420 6030 atomic_or_32(&iss->iss_flags, ISS_RESET_ACTIVE);
6421 6031
6422 6032 /*
6423 6033 * Now go through each LUN in this session and make sure all of them
6424 6034 * can be reset.
6425 6035 */
6426 6036 lm = iss->iss_sm;
6427 6037 for (i = 0, lf = 0; i < lm->lm_nentries; i++) {
6428 6038 if (lm->lm_plus[i] == NULL)
6429 6039 continue;
6430 6040 lf++;
6431 6041 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6432 6042 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private);
6433 6043 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
6434 6044 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
6435 6045 rw_exit(iss->iss_lockp);
6436 6046 mutex_exit(&stmf_state.stmf_lock);
6437 6047 stmf_scsilib_send_status(task, STATUS_CHECK,
6438 6048 STMF_SAA_OPERATION_IN_PROGRESS);
6439 6049 return;
6440 6050 }
6441 6051 }
6442 6052 if (lf == 0) {
6443 6053 /* No luns in this session */
6444 6054 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
6445 6055 rw_exit(iss->iss_lockp);
6446 6056 mutex_exit(&stmf_state.stmf_lock);
6447 6057 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
6448 6058 return;
6449 6059 }
6450 6060
6451 6061 /* ok, start the damage */
6452 6062 itask->itask_flags |= ITASK_DEFAULT_HANDLING |
6453 6063 ITASK_CAUSING_TARGET_RESET;
6454 6064 for (i = 0; i < lm->lm_nentries; i++) {
6455 6065 if (lm->lm_plus[i] == NULL)
6456 6066 continue;
6457 6067 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6458 6068 ilu = (stmf_i_lu_t *)(lm_ent->ent_lu->lu_stmf_private);
6459 6069 atomic_or_32(&ilu->ilu_flags, ILU_RESET_ACTIVE);
6460 6070 }
6461 6071
6462 6072 for (i = 0; i < lm->lm_nentries; i++) {
6463 6073 if (lm->lm_plus[i] == NULL)
6464 6074 continue;
6465 6075 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
6466 6076 stmf_abort(STMF_QUEUE_ABORT_LU, task, STMF_ABORTED,
6467 6077 lm_ent->ent_lu);
6468 6078 }
6469 6079
6470 6080 rw_exit(iss->iss_lockp);
6471 6081 mutex_exit(&stmf_state.stmf_lock);
6472 6082
6473 6083 /* Start polling on this task */
6474 6084 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
6475 6085 != STMF_SUCCESS) {
6476 6086 stmf_abort(STMF_QUEUE_TASK_ABORT, task, STMF_ALLOC_FAILURE,
6477 6087 NULL);
6478 6088 return;
6479 6089 }
6480 6090 }
6481 6091
6482 6092 int
6483 6093 stmf_handle_cmd_during_ic(stmf_i_scsi_task_t *itask)
6484 6094 {
6485 6095 scsi_task_t *task = itask->itask_task;
6486 6096 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
6487 6097 task->task_session->ss_stmf_private;
6488 6098
6489 6099 rw_enter(iss->iss_lockp, RW_WRITER);
6490 6100 if (((iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) == 0) ||
6491 6101 (task->task_cdb[0] == SCMD_INQUIRY)) {
6492 6102 rw_exit(iss->iss_lockp);
6493 6103 return (0);
6494 6104 }
6495 6105 atomic_and_32(&iss->iss_flags,
6496 6106 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
6497 6107 rw_exit(iss->iss_lockp);
6498 6108
6499 6109 if (task->task_cdb[0] == SCMD_REPORT_LUNS) {
6500 6110 return (0);
6501 6111 }
6502 6112 stmf_scsilib_send_status(task, STATUS_CHECK,
6503 6113 STMF_SAA_REPORT_LUN_DATA_HAS_CHANGED);
6504 6114 return (1);
6505 6115 }
6506 6116
6507 6117 void
6508 6118 stmf_worker_init()
6509 6119 {
6510 6120 uint32_t i;
6511 6121
6512 6122 /* Make local copy of global tunables */
6513 6123 stmf_i_max_nworkers = stmf_max_nworkers;
6514 6124 stmf_i_min_nworkers = stmf_min_nworkers;
6515 6125
6516 6126 ASSERT(stmf_workers == NULL);
6517 6127 if (stmf_i_min_nworkers < 4) {
6518 6128 stmf_i_min_nworkers = 4;
6519 6129 }
6520 6130 if (stmf_i_max_nworkers < stmf_i_min_nworkers) {
6521 6131 stmf_i_max_nworkers = stmf_i_min_nworkers;
6522 6132 }
6523 6133 stmf_workers = (stmf_worker_t *)kmem_zalloc(
6524 6134 sizeof (stmf_worker_t) * stmf_i_max_nworkers, KM_SLEEP);
6525 6135 for (i = 0; i < stmf_i_max_nworkers; i++) {
6526 6136 stmf_worker_t *w = &stmf_workers[i];
6527 6137 mutex_init(&w->worker_lock, NULL, MUTEX_DRIVER, NULL);
6528 6138 cv_init(&w->worker_cv, NULL, CV_DRIVER, NULL);
6529 6139 }
6530 6140 stmf_worker_mgmt_delay = drv_usectohz(20 * 1000);
6531 6141 stmf_workers_state = STMF_WORKERS_ENABLED;
6532 6142
6533 6143 /* Workers will be started by stmf_worker_mgmt() */
6534 6144
6535 6145 /* Lets wait for atleast one worker to start */
6536 6146 while (stmf_nworkers_cur == 0)
6537 6147 delay(drv_usectohz(20 * 1000));
6538 6148 stmf_worker_mgmt_delay = drv_usectohz(3 * 1000 * 1000);
6539 6149 }
6540 6150
6541 6151 stmf_status_t
6542 6152 stmf_worker_fini()
6543 6153 {
6544 6154 int i;
6545 6155 clock_t sb;
6546 6156
6547 6157 if (stmf_workers_state == STMF_WORKERS_DISABLED)
6548 6158 return (STMF_SUCCESS);
6549 6159 ASSERT(stmf_workers);
6550 6160 stmf_workers_state = STMF_WORKERS_DISABLED;
6551 6161 stmf_worker_mgmt_delay = drv_usectohz(20 * 1000);
6552 6162 cv_signal(&stmf_state.stmf_cv);
6553 6163
6554 6164 sb = ddi_get_lbolt() + drv_usectohz(10 * 1000 * 1000);
6555 6165 /* Wait for all the threads to die */
6556 6166 while (stmf_nworkers_cur != 0) {
6557 6167 if (ddi_get_lbolt() > sb) {
6558 6168 stmf_workers_state = STMF_WORKERS_ENABLED;
6559 6169 return (STMF_BUSY);
6560 6170 }
6561 6171 delay(drv_usectohz(100 * 1000));
6562 6172 }
6563 6173 for (i = 0; i < stmf_i_max_nworkers; i++) {
6564 6174 stmf_worker_t *w = &stmf_workers[i];
6565 6175 mutex_destroy(&w->worker_lock);
6566 6176 cv_destroy(&w->worker_cv);
6567 6177 }
6568 6178 kmem_free(stmf_workers, sizeof (stmf_worker_t) * stmf_i_max_nworkers);
6569 6179 stmf_workers = NULL;
6570 6180
6571 6181 return (STMF_SUCCESS);
6572 6182 }
6573 6183
6574 6184 void
6575 6185 stmf_worker_task(void *arg)
6576 6186 {
6577 6187 stmf_worker_t *w;
6578 6188 stmf_i_scsi_session_t *iss;
6579 6189 scsi_task_t *task;
6580 6190 stmf_i_scsi_task_t *itask;
6581 6191 stmf_data_buf_t *dbuf;
6582 6192 stmf_lu_t *lu;
6583 6193 clock_t wait_timer = 0;
6584 6194 clock_t wait_ticks, wait_delta = 0;
6585 6195 uint32_t old, new;
6586 6196 uint8_t curcmd;
6587 6197 uint8_t abort_free;
6588 6198 uint8_t wait_queue;
6589 6199 uint8_t dec_qdepth;
6590 6200
6591 6201 w = (stmf_worker_t *)arg;
6592 6202 wait_ticks = drv_usectohz(10000);
6593 6203
6594 6204 DTRACE_PROBE1(worker__create, stmf_worker_t, w);
6595 6205 mutex_enter(&w->worker_lock);
6596 6206 w->worker_flags |= STMF_WORKER_STARTED | STMF_WORKER_ACTIVE;
6597 6207 stmf_worker_loop:;
6598 6208 if ((w->worker_ref_count == 0) &&
6599 6209 (w->worker_flags & STMF_WORKER_TERMINATE)) {
6600 6210 w->worker_flags &= ~(STMF_WORKER_STARTED |
6601 6211 STMF_WORKER_ACTIVE | STMF_WORKER_TERMINATE);
6602 6212 w->worker_tid = NULL;
6603 6213 mutex_exit(&w->worker_lock);
6604 6214 DTRACE_PROBE1(worker__destroy, stmf_worker_t, w);
6605 6215 thread_exit();
6606 6216 }
6607 6217 /* CONSTCOND */
6608 6218 while (1) {
6609 6219 dec_qdepth = 0;
6610 6220 if (wait_timer && (ddi_get_lbolt() >= wait_timer)) {
6611 6221 wait_timer = 0;
6612 6222 wait_delta = 0;
6613 6223 if (w->worker_wait_head) {
6614 6224 ASSERT(w->worker_wait_tail);
6615 6225 if (w->worker_task_head == NULL)
6616 6226 w->worker_task_head =
6617 6227 w->worker_wait_head;
6618 6228 else
6619 6229 w->worker_task_tail->itask_worker_next =
6620 6230 w->worker_wait_head;
6621 6231 w->worker_task_tail = w->worker_wait_tail;
6622 6232 w->worker_wait_head = w->worker_wait_tail =
6623 6233 NULL;
6624 6234 }
6625 6235 }
6626 6236 if ((itask = w->worker_task_head) == NULL) {
6627 6237 break;
6628 6238 }
6629 6239 task = itask->itask_task;
6630 6240 DTRACE_PROBE2(worker__active, stmf_worker_t, w,
6631 6241 scsi_task_t *, task);
6632 6242 w->worker_task_head = itask->itask_worker_next;
6633 6243 if (w->worker_task_head == NULL)
6634 6244 w->worker_task_tail = NULL;
6635 6245
6636 6246 wait_queue = 0;
6637 6247 abort_free = 0;
6638 6248 if (itask->itask_ncmds > 0) {
6639 6249 curcmd = itask->itask_cmd_stack[itask->itask_ncmds - 1];
6640 6250 } else {
6641 6251 ASSERT(itask->itask_flags & ITASK_BEING_ABORTED);
6642 6252 }
6643 6253 do {
6644 6254 old = itask->itask_flags;
6645 6255 if (old & ITASK_BEING_ABORTED) {
6646 6256 itask->itask_ncmds = 1;
6647 6257 curcmd = itask->itask_cmd_stack[0] =
6648 6258 ITASK_CMD_ABORT;
6649 6259 goto out_itask_flag_loop;
6650 6260 } else if ((curcmd & ITASK_CMD_MASK) ==
6651 6261 ITASK_CMD_NEW_TASK) {
6652 6262 /*
6653 6263 * set ITASK_KSTAT_IN_RUNQ, this flag
6654 6264 * will not reset until task completed
6655 6265 */
6656 6266 new = old | ITASK_KNOWN_TO_LU |
6657 6267 ITASK_KSTAT_IN_RUNQ;
6658 6268 } else {
6659 6269 goto out_itask_flag_loop;
6660 6270 }
6661 6271 } while (atomic_cas_32(&itask->itask_flags, old, new) != old);
6662 6272
6663 6273 out_itask_flag_loop:
6664 6274
6665 6275 /*
6666 6276 * Decide if this task needs to go to a queue and/or if
6667 6277 * we can decrement the itask_cmd_stack.
6668 6278 */
6669 6279 if (curcmd == ITASK_CMD_ABORT) {
6670 6280 if (itask->itask_flags & (ITASK_KNOWN_TO_LU |
6671 6281 ITASK_KNOWN_TO_TGT_PORT)) {
6672 6282 wait_queue = 1;
6673 6283 } else {
6674 6284 abort_free = 1;
6675 6285 }
6676 6286 } else if ((curcmd & ITASK_CMD_POLL) &&
6677 6287 (itask->itask_poll_timeout > ddi_get_lbolt())) {
6678 6288 wait_queue = 1;
6679 6289 }
6680 6290
6681 6291 if (wait_queue) {
6682 6292 itask->itask_worker_next = NULL;
6683 6293 if (w->worker_wait_tail) {
6684 6294 w->worker_wait_tail->itask_worker_next = itask;
6685 6295 } else {
6686 6296 w->worker_wait_head = itask;
6687 6297 }
6688 6298 w->worker_wait_tail = itask;
6689 6299 if (wait_timer == 0) {
6690 6300 wait_timer = ddi_get_lbolt() + wait_ticks;
6691 6301 wait_delta = wait_ticks;
6692 6302 }
6693 6303 } else if ((--(itask->itask_ncmds)) != 0) {
6694 6304 itask->itask_worker_next = NULL;
6695 6305 if (w->worker_task_tail) {
6696 6306 w->worker_task_tail->itask_worker_next = itask;
6697 6307 } else {
6698 6308 w->worker_task_head = itask;
6699 6309 }
6700 6310 w->worker_task_tail = itask;
6701 6311 } else {
6702 6312 atomic_and_32(&itask->itask_flags,
6703 6313 ~ITASK_IN_WORKER_QUEUE);
6704 6314 /*
6705 6315 * This is where the queue depth should go down by
6706 6316 * one but we delay that on purpose to account for
6707 6317 * the call into the provider. The actual decrement
6708 6318 * happens after the worker has done its job.
6709 6319 */
6710 6320 dec_qdepth = 1;
6711 6321 itask->itask_waitq_time +=
6712 6322 gethrtime() - itask->itask_waitq_enter_timestamp;
6713 6323 }
6714 6324
6715 6325 /* We made it here means we are going to call LU */
6716 6326 if ((itask->itask_flags & ITASK_DEFAULT_HANDLING) == 0)
6717 6327 lu = task->task_lu;
6718 6328 else
6719 6329 lu = dlun0;
6720 6330 dbuf = itask->itask_dbufs[ITASK_CMD_BUF_NDX(curcmd)];
6721 6331 mutex_exit(&w->worker_lock);
6722 6332 curcmd &= ITASK_CMD_MASK;
6723 6333 stmf_task_audit(itask, TE_PROCESS_CMD, curcmd, dbuf);
6724 6334 switch (curcmd) {
6725 6335 case ITASK_CMD_NEW_TASK:
6726 6336 iss = (stmf_i_scsi_session_t *)
6727 6337 task->task_session->ss_stmf_private;
6728 6338 stmf_itl_lu_new_task(itask);
6729 6339 if (iss->iss_flags & ISS_LUN_INVENTORY_CHANGED) {
6730 6340 if (stmf_handle_cmd_during_ic(itask))
6731 6341 break;
6732 6342 }
6733 6343 #ifdef DEBUG
6734 6344 if (stmf_drop_task_counter > 0) {
6735 6345 if (atomic_add_32_nv(
6736 6346 (uint32_t *)&stmf_drop_task_counter,
6737 6347 -1) == 1) {
6738 6348 break;
6739 6349 }
6740 6350 }
6741 6351 #endif
6742 6352 DTRACE_PROBE1(scsi__task__start, scsi_task_t *, task);
6743 6353 lu->lu_new_task(task, dbuf);
6744 6354 break;
6745 6355 case ITASK_CMD_DATA_XFER_DONE:
6746 6356 lu->lu_dbuf_xfer_done(task, dbuf);
6747 6357 break;
6748 6358 case ITASK_CMD_STATUS_DONE:
6749 6359 lu->lu_send_status_done(task);
6750 6360 break;
6751 6361 case ITASK_CMD_ABORT:
6752 6362 if (abort_free) {
6753 6363 stmf_task_free(task);
6754 6364 } else {
6755 6365 stmf_do_task_abort(task);
6756 6366 }
6757 6367 break;
6758 6368 case ITASK_CMD_POLL_LU:
6759 6369 if (!wait_queue) {
6760 6370 lu->lu_task_poll(task);
6761 6371 }
6762 6372 break;
6763 6373 case ITASK_CMD_POLL_LPORT:
6764 6374 if (!wait_queue)
6765 6375 task->task_lport->lport_task_poll(task);
6766 6376 break;
6767 6377 case ITASK_CMD_SEND_STATUS:
6768 6378 /* case ITASK_CMD_XFER_DATA: */
6769 6379 break;
6770 6380 }
6771 6381 mutex_enter(&w->worker_lock);
6772 6382 if (dec_qdepth) {
6773 6383 w->worker_queue_depth--;
6774 6384 }
6775 6385 }
6776 6386 if ((w->worker_flags & STMF_WORKER_TERMINATE) && (wait_timer == 0)) {
6777 6387 if (w->worker_ref_count == 0)
6778 6388 goto stmf_worker_loop;
6779 6389 else {
6780 6390 wait_timer = ddi_get_lbolt() + 1;
6781 6391 wait_delta = 1;
6782 6392 }
6783 6393 }
6784 6394 w->worker_flags &= ~STMF_WORKER_ACTIVE;
6785 6395 if (wait_timer) {
6786 6396 DTRACE_PROBE1(worker__timed__sleep, stmf_worker_t, w);
6787 6397 (void) cv_reltimedwait(&w->worker_cv, &w->worker_lock,
6788 6398 wait_delta, TR_CLOCK_TICK);
6789 6399 } else {
6790 6400 DTRACE_PROBE1(worker__sleep, stmf_worker_t, w);
6791 6401 cv_wait(&w->worker_cv, &w->worker_lock);
6792 6402 }
6793 6403 DTRACE_PROBE1(worker__wakeup, stmf_worker_t, w);
6794 6404 w->worker_flags |= STMF_WORKER_ACTIVE;
6795 6405 goto stmf_worker_loop;
6796 6406 }
6797 6407
6798 6408 void
6799 6409 stmf_worker_mgmt()
6800 6410 {
6801 6411 int i;
6802 6412 int workers_needed;
6803 6413 uint32_t qd;
6804 6414 clock_t tps, d = 0;
6805 6415 uint32_t cur_max_ntasks = 0;
6806 6416 stmf_worker_t *w;
6807 6417
6808 6418 /* Check if we are trying to increase the # of threads */
6809 6419 for (i = stmf_nworkers_cur; i < stmf_nworkers_needed; i++) {
6810 6420 if (stmf_workers[i].worker_flags & STMF_WORKER_STARTED) {
6811 6421 stmf_nworkers_cur++;
6812 6422 stmf_nworkers_accepting_cmds++;
6813 6423 } else {
6814 6424 /* Wait for transition to complete */
6815 6425 return;
6816 6426 }
6817 6427 }
6818 6428 /* Check if we are trying to decrease the # of workers */
6819 6429 for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) {
6820 6430 if ((stmf_workers[i].worker_flags & STMF_WORKER_STARTED) == 0) {
6821 6431 stmf_nworkers_cur--;
6822 6432 /*
6823 6433 * stmf_nworkers_accepting_cmds has already been
6824 6434 * updated by the request to reduce the # of workers.
6825 6435 */
6826 6436 } else {
6827 6437 /* Wait for transition to complete */
6828 6438 return;
6829 6439 }
6830 6440 }
6831 6441 /* Check if we are being asked to quit */
6832 6442 if (stmf_workers_state != STMF_WORKERS_ENABLED) {
6833 6443 if (stmf_nworkers_cur) {
6834 6444 workers_needed = 0;
6835 6445 goto worker_mgmt_trigger_change;
6836 6446 }
6837 6447 return;
6838 6448 }
6839 6449 /* Check if we are starting */
6840 6450 if (stmf_nworkers_cur < stmf_i_min_nworkers) {
6841 6451 workers_needed = stmf_i_min_nworkers;
6842 6452 goto worker_mgmt_trigger_change;
6843 6453 }
6844 6454
6845 6455 tps = drv_usectohz(1 * 1000 * 1000);
6846 6456 if ((stmf_wm_last != 0) &&
6847 6457 ((d = ddi_get_lbolt() - stmf_wm_last) > tps)) {
6848 6458 qd = 0;
6849 6459 for (i = 0; i < stmf_nworkers_accepting_cmds; i++) {
6850 6460 qd += stmf_workers[i].worker_max_qdepth_pu;
6851 6461 stmf_workers[i].worker_max_qdepth_pu = 0;
6852 6462 if (stmf_workers[i].worker_max_sys_qdepth_pu >
6853 6463 cur_max_ntasks) {
6854 6464 cur_max_ntasks =
6855 6465 stmf_workers[i].worker_max_sys_qdepth_pu;
6856 6466 }
6857 6467 stmf_workers[i].worker_max_sys_qdepth_pu = 0;
6858 6468 }
6859 6469 }
6860 6470 stmf_wm_last = ddi_get_lbolt();
6861 6471 if (d <= tps) {
6862 6472 /* still ramping up */
6863 6473 return;
6864 6474 }
6865 6475 /* max qdepth cannot be more than max tasks */
6866 6476 if (qd > cur_max_ntasks)
6867 6477 qd = cur_max_ntasks;
6868 6478
6869 6479 /* See if we have more workers */
6870 6480 if (qd < stmf_nworkers_accepting_cmds) {
6871 6481 /*
6872 6482 * Since we dont reduce the worker count right away, monitor
6873 6483 * the highest load during the scale_down_delay.
6874 6484 */
6875 6485 if (qd > stmf_worker_scale_down_qd)
6876 6486 stmf_worker_scale_down_qd = qd;
6877 6487 if (stmf_worker_scale_down_timer == 0) {
6878 6488 stmf_worker_scale_down_timer = ddi_get_lbolt() +
6879 6489 drv_usectohz(stmf_worker_scale_down_delay *
6880 6490 1000 * 1000);
6881 6491 return;
6882 6492 }
6883 6493 if (ddi_get_lbolt() < stmf_worker_scale_down_timer) {
6884 6494 return;
6885 6495 }
6886 6496 /* Its time to reduce the workers */
6887 6497 if (stmf_worker_scale_down_qd < stmf_i_min_nworkers)
6888 6498 stmf_worker_scale_down_qd = stmf_i_min_nworkers;
6889 6499 if (stmf_worker_scale_down_qd > stmf_i_max_nworkers)
6890 6500 stmf_worker_scale_down_qd = stmf_i_max_nworkers;
6891 6501 if (stmf_worker_scale_down_qd == stmf_nworkers_cur)
6892 6502 return;
6893 6503 workers_needed = stmf_worker_scale_down_qd;
6894 6504 stmf_worker_scale_down_qd = 0;
6895 6505 goto worker_mgmt_trigger_change;
6896 6506 }
6897 6507 stmf_worker_scale_down_qd = 0;
6898 6508 stmf_worker_scale_down_timer = 0;
6899 6509 if (qd > stmf_i_max_nworkers)
6900 6510 qd = stmf_i_max_nworkers;
6901 6511 if (qd < stmf_i_min_nworkers)
6902 6512 qd = stmf_i_min_nworkers;
6903 6513 if (qd == stmf_nworkers_cur)
6904 6514 return;
6905 6515 workers_needed = qd;
6906 6516 goto worker_mgmt_trigger_change;
6907 6517
6908 6518 /* NOTREACHED */
6909 6519 return;
6910 6520
6911 6521 worker_mgmt_trigger_change:
6912 6522 ASSERT(workers_needed != stmf_nworkers_cur);
6913 6523 if (workers_needed > stmf_nworkers_cur) {
6914 6524 stmf_nworkers_needed = workers_needed;
6915 6525 for (i = stmf_nworkers_cur; i < workers_needed; i++) {
6916 6526 w = &stmf_workers[i];
6917 6527 w->worker_tid = thread_create(NULL, 0, stmf_worker_task,
6918 6528 (void *)&stmf_workers[i], 0, &p0, TS_RUN,
6919 6529 minclsyspri);
6920 6530 }
6921 6531 return;
6922 6532 }
6923 6533 /* At this point we know that we are decreasing the # of workers */
6924 6534 stmf_nworkers_accepting_cmds = workers_needed;
6925 6535 stmf_nworkers_needed = workers_needed;
6926 6536 /* Signal the workers that its time to quit */
6927 6537 for (i = (stmf_nworkers_cur - 1); i >= stmf_nworkers_needed; i--) {
6928 6538 w = &stmf_workers[i];
6929 6539 ASSERT(w && (w->worker_flags & STMF_WORKER_STARTED));
6930 6540 mutex_enter(&w->worker_lock);
6931 6541 w->worker_flags |= STMF_WORKER_TERMINATE;
6932 6542 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0)
6933 6543 cv_signal(&w->worker_cv);
6934 6544 mutex_exit(&w->worker_lock);
6935 6545 }
6936 6546 }
6937 6547
6938 6548 /*
6939 6549 * Fills out a dbuf from stmf_xfer_data_t (contained in the db_lu_private).
6940 6550 * If all the data has been filled out, frees the xd and makes
6941 6551 * db_lu_private NULL.
6942 6552 */
6943 6553 void
6944 6554 stmf_xd_to_dbuf(stmf_data_buf_t *dbuf, int set_rel_off)
6945 6555 {
6946 6556 stmf_xfer_data_t *xd;
6947 6557 uint8_t *p;
6948 6558 int i;
6949 6559 uint32_t s;
6950 6560
6951 6561 xd = (stmf_xfer_data_t *)dbuf->db_lu_private;
6952 6562 dbuf->db_data_size = 0;
6953 6563 if (set_rel_off)
6954 6564 dbuf->db_relative_offset = xd->size_done;
6955 6565 for (i = 0; i < dbuf->db_sglist_length; i++) {
6956 6566 s = min(xd->size_left, dbuf->db_sglist[i].seg_length);
6957 6567 p = &xd->buf[xd->size_done];
6958 6568 bcopy(p, dbuf->db_sglist[i].seg_addr, s);
6959 6569 xd->size_left -= s;
6960 6570 xd->size_done += s;
6961 6571 dbuf->db_data_size += s;
6962 6572 if (xd->size_left == 0) {
6963 6573 kmem_free(xd, xd->alloc_size);
6964 6574 dbuf->db_lu_private = NULL;
6965 6575 return;
6966 6576 }
6967 6577 }
6968 6578 }
6969 6579
6970 6580 /* ARGSUSED */
6971 6581 stmf_status_t
6972 6582 stmf_dlun0_task_alloc(scsi_task_t *task)
6973 6583 {
6974 6584 return (STMF_SUCCESS);
6975 6585 }
6976 6586
6977 6587 void
6978 6588 stmf_dlun0_new_task(scsi_task_t *task, stmf_data_buf_t *dbuf)
6979 6589 {
6980 6590 uint8_t *cdbp = (uint8_t *)&task->task_cdb[0];
6981 6591 stmf_i_scsi_session_t *iss;
6982 6592 uint32_t sz, minsz;
6983 6593 uint8_t *p;
6984 6594 stmf_xfer_data_t *xd;
6985 6595 uint8_t inq_page_length = 31;
6986 6596
6987 6597 if (task->task_mgmt_function) {
6988 6598 stmf_scsilib_handle_task_mgmt(task);
6989 6599 return;
6990 6600 }
6991 6601
6992 6602 switch (cdbp[0]) {
6993 6603 case SCMD_INQUIRY:
6994 6604 /*
6995 6605 * Basic protocol checks. In addition, only reply to
6996 6606 * standard inquiry. Otherwise, the LU provider needs
6997 6607 * to respond.
6998 6608 */
6999 6609
7000 6610 if (cdbp[2] || (cdbp[1] & 1) || cdbp[5]) {
7001 6611 stmf_scsilib_send_status(task, STATUS_CHECK,
7002 6612 STMF_SAA_INVALID_FIELD_IN_CDB);
7003 6613 return;
7004 6614 }
7005 6615
7006 6616 task->task_cmd_xfer_length =
7007 6617 (((uint32_t)cdbp[3]) << 8) | cdbp[4];
7008 6618
7009 6619 if (task->task_additional_flags &
7010 6620 TASK_AF_NO_EXPECTED_XFER_LENGTH) {
7011 6621 task->task_expected_xfer_length =
7012 6622 task->task_cmd_xfer_length;
7013 6623 }
7014 6624
7015 6625 sz = min(task->task_expected_xfer_length,
7016 6626 min(36, task->task_cmd_xfer_length));
7017 6627 minsz = 36;
7018 6628
7019 6629 if (sz == 0) {
7020 6630 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7021 6631 return;
7022 6632 }
7023 6633
7024 6634 if (dbuf && (dbuf->db_sglist[0].seg_length < 36)) {
7025 6635 /*
7026 6636 * Ignore any preallocated dbuf if the size is less
7027 6637 * than 36. It will be freed during the task_free.
7028 6638 */
7029 6639 dbuf = NULL;
7030 6640 }
7031 6641 if (dbuf == NULL)
7032 6642 dbuf = stmf_alloc_dbuf(task, minsz, &minsz, 0);
7033 6643 if ((dbuf == NULL) || (dbuf->db_sglist[0].seg_length < sz)) {
7034 6644 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7035 6645 STMF_ALLOC_FAILURE, NULL);
7036 6646 return;
7037 6647 }
7038 6648 dbuf->db_lu_private = NULL;
7039 6649
7040 6650 p = dbuf->db_sglist[0].seg_addr;
7041 6651
7042 6652 /*
7043 6653 * Standard inquiry handling only.
7044 6654 */
7045 6655
7046 6656 bzero(p, inq_page_length + 5);
7047 6657
7048 6658 p[0] = DPQ_SUPPORTED | DTYPE_UNKNOWN;
7049 6659 p[2] = 5;
7050 6660 p[3] = 0x12;
7051 6661 p[4] = inq_page_length;
7052 6662 p[6] = 0x80;
7053 6663
7054 6664 (void) strncpy((char *)p+8, "SUN ", 8);
7055 6665 (void) strncpy((char *)p+16, "COMSTAR ", 16);
7056 6666 (void) strncpy((char *)p+32, "1.0 ", 4);
7057 6667
7058 6668 dbuf->db_data_size = sz;
7059 6669 dbuf->db_relative_offset = 0;
7060 6670 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
7061 6671 (void) stmf_xfer_data(task, dbuf, 0);
7062 6672
7063 6673 return;
7064 6674
7065 6675 case SCMD_REPORT_LUNS:
7066 6676 task->task_cmd_xfer_length =
7067 6677 ((((uint32_t)task->task_cdb[6]) << 24) |
7068 6678 (((uint32_t)task->task_cdb[7]) << 16) |
7069 6679 (((uint32_t)task->task_cdb[8]) << 8) |
7070 6680 ((uint32_t)task->task_cdb[9]));
7071 6681
7072 6682 if (task->task_additional_flags &
7073 6683 TASK_AF_NO_EXPECTED_XFER_LENGTH) {
7074 6684 task->task_expected_xfer_length =
7075 6685 task->task_cmd_xfer_length;
7076 6686 }
7077 6687
7078 6688 sz = min(task->task_expected_xfer_length,
7079 6689 task->task_cmd_xfer_length);
7080 6690
7081 6691 if (sz < 16) {
7082 6692 stmf_scsilib_send_status(task, STATUS_CHECK,
7083 6693 STMF_SAA_INVALID_FIELD_IN_CDB);
7084 6694 return;
7085 6695 }
7086 6696
7087 6697 iss = (stmf_i_scsi_session_t *)
7088 6698 task->task_session->ss_stmf_private;
7089 6699 rw_enter(iss->iss_lockp, RW_WRITER);
7090 6700 xd = stmf_session_prepare_report_lun_data(iss->iss_sm);
7091 6701 rw_exit(iss->iss_lockp);
7092 6702
7093 6703 if (xd == NULL) {
7094 6704 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7095 6705 STMF_ALLOC_FAILURE, NULL);
7096 6706 return;
7097 6707 }
7098 6708
7099 6709 sz = min(sz, xd->size_left);
7100 6710 xd->size_left = sz;
7101 6711 minsz = min(512, sz);
7102 6712
7103 6713 if (dbuf == NULL)
7104 6714 dbuf = stmf_alloc_dbuf(task, sz, &minsz, 0);
7105 6715 if (dbuf == NULL) {
7106 6716 kmem_free(xd, xd->alloc_size);
7107 6717 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7108 6718 STMF_ALLOC_FAILURE, NULL);
7109 6719 return;
7110 6720 }
7111 6721 dbuf->db_lu_private = xd;
7112 6722 stmf_xd_to_dbuf(dbuf, 1);
7113 6723
7114 6724 atomic_and_32(&iss->iss_flags,
7115 6725 ~(ISS_LUN_INVENTORY_CHANGED | ISS_GOT_INITIAL_LUNS));
7116 6726 dbuf->db_flags = DB_DIRECTION_TO_RPORT;
7117 6727 (void) stmf_xfer_data(task, dbuf, 0);
7118 6728 return;
7119 6729 }
7120 6730
7121 6731 stmf_scsilib_send_status(task, STATUS_CHECK, STMF_SAA_INVALID_OPCODE);
7122 6732 }
7123 6733
7124 6734 void
7125 6735 stmf_dlun0_dbuf_done(scsi_task_t *task, stmf_data_buf_t *dbuf)
7126 6736 {
7127 6737 stmf_i_scsi_task_t *itask =
7128 6738 (stmf_i_scsi_task_t *)task->task_stmf_private;
7129 6739
7130 6740 if (dbuf->db_xfer_status != STMF_SUCCESS) {
7131 6741 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7132 6742 dbuf->db_xfer_status, NULL);
7133 6743 return;
7134 6744 }
7135 6745 task->task_nbytes_transferred += dbuf->db_data_size;
7136 6746 if (dbuf->db_lu_private) {
7137 6747 /* There is more */
7138 6748 stmf_xd_to_dbuf(dbuf, 1);
7139 6749 (void) stmf_xfer_data(task, dbuf, 0);
7140 6750 return;
7141 6751 }
7142 6752
7143 6753 stmf_free_dbuf(task, dbuf);
7144 6754 /*
7145 6755 * If this is a proxy task, it will need to be completed from the
7146 6756 * proxy port provider. This message lets pppt know that the xfer
7147 6757 * is complete. When we receive the status from pppt, we will
7148 6758 * then relay that status back to the lport.
7149 6759 */
7150 6760 if (itask->itask_flags & ITASK_PROXY_TASK) {
7151 6761 stmf_ic_msg_t *ic_xfer_done_msg = NULL;
7152 6762 stmf_status_t ic_ret = STMF_FAILURE;
7153 6763 uint64_t session_msg_id;
7154 6764 mutex_enter(&stmf_state.stmf_lock);
7155 6765 session_msg_id = stmf_proxy_msg_id++;
7156 6766 mutex_exit(&stmf_state.stmf_lock);
7157 6767 /* send xfer done status to pppt */
7158 6768 ic_xfer_done_msg = ic_scsi_data_xfer_done_msg_alloc(
7159 6769 itask->itask_proxy_msg_id,
7160 6770 task->task_session->ss_session_id,
7161 6771 STMF_SUCCESS, session_msg_id);
7162 6772 if (ic_xfer_done_msg) {
7163 6773 ic_ret = ic_tx_msg(ic_xfer_done_msg);
7164 6774 if (ic_ret != STMF_IC_MSG_SUCCESS) {
7165 6775 cmn_err(CE_WARN, "unable to xmit session msg");
7166 6776 }
7167 6777 }
7168 6778 /* task will be completed from pppt */
7169 6779 return;
7170 6780 }
7171 6781 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7172 6782 }
7173 6783
7174 6784 /* ARGSUSED */
7175 6785 void
7176 6786 stmf_dlun0_status_done(scsi_task_t *task)
7177 6787 {
7178 6788 }
7179 6789
7180 6790 /* ARGSUSED */
7181 6791 void
7182 6792 stmf_dlun0_task_free(scsi_task_t *task)
7183 6793 {
7184 6794 }
7185 6795
7186 6796 /* ARGSUSED */
7187 6797 stmf_status_t
7188 6798 stmf_dlun0_abort(struct stmf_lu *lu, int abort_cmd, void *arg, uint32_t flags)
7189 6799 {
7190 6800 scsi_task_t *task = (scsi_task_t *)arg;
7191 6801 stmf_i_scsi_task_t *itask =
7192 6802 (stmf_i_scsi_task_t *)task->task_stmf_private;
7193 6803 stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7194 6804 int i;
7195 6805 uint8_t map;
7196 6806
7197 6807 if ((task->task_mgmt_function) && (itask->itask_flags &
7198 6808 (ITASK_CAUSING_LU_RESET | ITASK_CAUSING_TARGET_RESET))) {
7199 6809 switch (task->task_mgmt_function) {
7200 6810 case TM_ABORT_TASK:
7201 6811 case TM_ABORT_TASK_SET:
7202 6812 case TM_CLEAR_TASK_SET:
7203 6813 case TM_LUN_RESET:
7204 6814 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7205 6815 break;
7206 6816 case TM_TARGET_RESET:
7207 6817 case TM_TARGET_COLD_RESET:
7208 6818 case TM_TARGET_WARM_RESET:
7209 6819 stmf_abort_target_reset(task);
7210 6820 break;
7211 6821 }
7212 6822 return (STMF_ABORT_SUCCESS);
7213 6823 }
7214 6824
7215 6825 /*
7216 6826 * OK so its not a task mgmt. Make sure we free any xd sitting
7217 6827 * inside any dbuf.
7218 6828 */
7219 6829 if ((map = itask->itask_allocated_buf_map) != 0) {
7220 6830 for (i = 0; i < 4; i++) {
7221 6831 if ((map & 1) &&
7222 6832 ((itask->itask_dbufs[i])->db_lu_private)) {
7223 6833 stmf_xfer_data_t *xd;
7224 6834 stmf_data_buf_t *dbuf;
7225 6835
7226 6836 dbuf = itask->itask_dbufs[i];
7227 6837 xd = (stmf_xfer_data_t *)dbuf->db_lu_private;
7228 6838 dbuf->db_lu_private = NULL;
7229 6839 kmem_free(xd, xd->alloc_size);
7230 6840 }
7231 6841 map >>= 1;
7232 6842 }
7233 6843 }
7234 6844 return (STMF_ABORT_SUCCESS);
7235 6845 }
7236 6846
7237 6847 void
7238 6848 stmf_dlun0_task_poll(struct scsi_task *task)
7239 6849 {
7240 6850 /* Right now we only do this for handling task management functions */
7241 6851 ASSERT(task->task_mgmt_function);
7242 6852
7243 6853 switch (task->task_mgmt_function) {
7244 6854 case TM_ABORT_TASK:
7245 6855 case TM_ABORT_TASK_SET:
7246 6856 case TM_CLEAR_TASK_SET:
7247 6857 case TM_LUN_RESET:
7248 6858 (void) stmf_lun_reset_poll(task->task_lu, task, 0);
7249 6859 return;
7250 6860 case TM_TARGET_RESET:
7251 6861 case TM_TARGET_COLD_RESET:
7252 6862 case TM_TARGET_WARM_RESET:
7253 6863 stmf_target_reset_poll(task);
7254 6864 return;
7255 6865 }
7256 6866 }
7257 6867
7258 6868 /* ARGSUSED */
7259 6869 void
7260 6870 stmf_dlun0_ctl(struct stmf_lu *lu, int cmd, void *arg)
7261 6871 {
7262 6872 /* This function will never be called */
7263 6873 cmn_err(CE_WARN, "stmf_dlun0_ctl called with cmd %x", cmd);
7264 6874 }
7265 6875
7266 6876 void
7267 6877 stmf_dlun_init()
7268 6878 {
7269 6879 stmf_i_lu_t *ilu;
7270 6880
7271 6881 dlun0 = stmf_alloc(STMF_STRUCT_STMF_LU, 0, 0);
7272 6882 dlun0->lu_task_alloc = stmf_dlun0_task_alloc;
7273 6883 dlun0->lu_new_task = stmf_dlun0_new_task;
7274 6884 dlun0->lu_dbuf_xfer_done = stmf_dlun0_dbuf_done;
7275 6885 dlun0->lu_send_status_done = stmf_dlun0_status_done;
7276 6886 dlun0->lu_task_free = stmf_dlun0_task_free;
7277 6887 dlun0->lu_abort = stmf_dlun0_abort;
7278 6888 dlun0->lu_task_poll = stmf_dlun0_task_poll;
7279 6889 dlun0->lu_ctl = stmf_dlun0_ctl;
7280 6890
7281 6891 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private;
7282 6892 ilu->ilu_cur_task_cntr = &ilu->ilu_task_cntr1;
7283 6893 }
7284 6894
7285 6895 stmf_status_t
7286 6896 stmf_dlun_fini()
7287 6897 {
7288 6898 stmf_i_lu_t *ilu;
7289 6899
7290 6900 ilu = (stmf_i_lu_t *)dlun0->lu_stmf_private;
7291 6901
7292 6902 ASSERT(ilu->ilu_ntasks == ilu->ilu_ntasks_free);
7293 6903 if (ilu->ilu_ntasks) {
7294 6904 stmf_i_scsi_task_t *itask, *nitask;
7295 6905
7296 6906 nitask = ilu->ilu_tasks;
7297 6907 do {
7298 6908 itask = nitask;
7299 6909 nitask = itask->itask_lu_next;
7300 6910 dlun0->lu_task_free(itask->itask_task);
7301 6911 stmf_free(itask->itask_task);
7302 6912 } while (nitask != NULL);
7303 6913
7304 6914 }
7305 6915 stmf_free(dlun0);
7306 6916 return (STMF_SUCCESS);
7307 6917 }
7308 6918
7309 6919 void
7310 6920 stmf_abort_target_reset(scsi_task_t *task)
7311 6921 {
7312 6922 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
7313 6923 task->task_session->ss_stmf_private;
7314 6924 stmf_lun_map_t *lm;
7315 6925 stmf_lun_map_ent_t *lm_ent;
7316 6926 stmf_i_lu_t *ilu;
7317 6927 int i;
7318 6928
7319 6929 rw_enter(iss->iss_lockp, RW_READER);
7320 6930 lm = iss->iss_sm;
7321 6931 for (i = 0; i < lm->lm_nentries; i++) {
7322 6932 if (lm->lm_plus[i] == NULL)
7323 6933 continue;
7324 6934 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
7325 6935 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private;
7326 6936 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
7327 6937 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7328 6938 }
7329 6939 }
7330 6940 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
7331 6941 rw_exit(iss->iss_lockp);
7332 6942 }
7333 6943
7334 6944 /*
7335 6945 * The return value is only used by function managing target reset.
7336 6946 */
7337 6947 stmf_status_t
7338 6948 stmf_lun_reset_poll(stmf_lu_t *lu, struct scsi_task *task, int target_reset)
7339 6949 {
7340 6950 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7341 6951 int ntasks_pending;
7342 6952
7343 6953 ntasks_pending = ilu->ilu_ntasks - ilu->ilu_ntasks_free;
7344 6954 /*
7345 6955 * This function is also used during Target reset. The idea is that
7346 6956 * once all the commands are aborted, call the LU's reset entry
7347 6957 * point (abort entry point with a reset flag). But if this Task
7348 6958 * mgmt is running on this LU then all the tasks cannot be aborted.
7349 6959 * one task (this task) will still be running which is OK.
7350 6960 */
7351 6961 if ((ntasks_pending == 0) || ((task->task_lu == lu) &&
7352 6962 (ntasks_pending == 1))) {
7353 6963 stmf_status_t ret;
7354 6964
7355 6965 if ((task->task_mgmt_function == TM_LUN_RESET) ||
7356 6966 (task->task_mgmt_function == TM_TARGET_RESET) ||
7357 6967 (task->task_mgmt_function == TM_TARGET_WARM_RESET) ||
7358 6968 (task->task_mgmt_function == TM_TARGET_COLD_RESET)) {
7359 6969 ret = lu->lu_abort(lu, STMF_LU_RESET_STATE, task, 0);
7360 6970 } else {
7361 6971 ret = STMF_SUCCESS;
7362 6972 }
7363 6973 if (ret == STMF_SUCCESS) {
7364 6974 atomic_and_32(&ilu->ilu_flags, ~ILU_RESET_ACTIVE);
7365 6975 }
7366 6976 if (target_reset) {
7367 6977 return (ret);
7368 6978 }
7369 6979 if (ret == STMF_SUCCESS) {
7370 6980 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7371 6981 return (ret);
7372 6982 }
7373 6983 if (ret != STMF_BUSY) {
7374 6984 stmf_abort(STMF_QUEUE_TASK_ABORT, task, ret, NULL);
7375 6985 return (ret);
7376 6986 }
7377 6987 }
7378 6988
7379 6989 if (target_reset) {
7380 6990 /* Tell target reset polling code that we are not done */
7381 6991 return (STMF_BUSY);
7382 6992 }
7383 6993
7384 6994 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
7385 6995 != STMF_SUCCESS) {
7386 6996 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7387 6997 STMF_ALLOC_FAILURE, NULL);
7388 6998 return (STMF_SUCCESS);
7389 6999 }
7390 7000
7391 7001 return (STMF_SUCCESS);
7392 7002 }
7393 7003
7394 7004 void
7395 7005 stmf_target_reset_poll(struct scsi_task *task)
7396 7006 {
7397 7007 stmf_i_scsi_session_t *iss = (stmf_i_scsi_session_t *)
7398 7008 task->task_session->ss_stmf_private;
7399 7009 stmf_lun_map_t *lm;
7400 7010 stmf_lun_map_ent_t *lm_ent;
7401 7011 stmf_i_lu_t *ilu;
7402 7012 stmf_status_t ret;
7403 7013 int i;
7404 7014 int not_done = 0;
7405 7015
7406 7016 ASSERT(iss->iss_flags & ISS_RESET_ACTIVE);
7407 7017
7408 7018 rw_enter(iss->iss_lockp, RW_READER);
7409 7019 lm = iss->iss_sm;
7410 7020 for (i = 0; i < lm->lm_nentries; i++) {
7411 7021 if (lm->lm_plus[i] == NULL)
7412 7022 continue;
7413 7023 lm_ent = (stmf_lun_map_ent_t *)lm->lm_plus[i];
7414 7024 ilu = (stmf_i_lu_t *)lm_ent->ent_lu->lu_stmf_private;
7415 7025 if (ilu->ilu_flags & ILU_RESET_ACTIVE) {
7416 7026 rw_exit(iss->iss_lockp);
7417 7027 ret = stmf_lun_reset_poll(lm_ent->ent_lu, task, 1);
7418 7028 rw_enter(iss->iss_lockp, RW_READER);
7419 7029 if (ret == STMF_SUCCESS)
7420 7030 continue;
7421 7031 not_done = 1;
7422 7032 if (ret != STMF_BUSY) {
7423 7033 rw_exit(iss->iss_lockp);
7424 7034 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7425 7035 STMF_ABORTED, NULL);
7426 7036 return;
7427 7037 }
7428 7038 }
7429 7039 }
7430 7040 rw_exit(iss->iss_lockp);
7431 7041
7432 7042 if (not_done) {
7433 7043 if (stmf_task_poll_lu(task, ITASK_DEFAULT_POLL_TIMEOUT)
7434 7044 != STMF_SUCCESS) {
7435 7045 stmf_abort(STMF_QUEUE_TASK_ABORT, task,
7436 7046 STMF_ALLOC_FAILURE, NULL);
7437 7047 return;
7438 7048 }
7439 7049 return;
7440 7050 }
7441 7051
7442 7052 atomic_and_32(&iss->iss_flags, ~ISS_RESET_ACTIVE);
7443 7053
7444 7054 stmf_scsilib_send_status(task, STATUS_GOOD, 0);
7445 7055 }
7446 7056
7447 7057 stmf_status_t
7448 7058 stmf_lu_add_event(stmf_lu_t *lu, int eventid)
7449 7059 {
7450 7060 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7451 7061
7452 7062 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7453 7063 return (STMF_INVALID_ARG);
7454 7064 }
7455 7065
7456 7066 STMF_EVENT_ADD(ilu->ilu_event_hdl, eventid);
7457 7067 return (STMF_SUCCESS);
7458 7068 }
7459 7069
7460 7070 stmf_status_t
7461 7071 stmf_lu_remove_event(stmf_lu_t *lu, int eventid)
7462 7072 {
7463 7073 stmf_i_lu_t *ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7464 7074
7465 7075 if (eventid == STMF_EVENT_ALL) {
7466 7076 STMF_EVENT_CLEAR_ALL(ilu->ilu_event_hdl);
7467 7077 return (STMF_SUCCESS);
7468 7078 }
7469 7079
7470 7080 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7471 7081 return (STMF_INVALID_ARG);
7472 7082 }
7473 7083
7474 7084 STMF_EVENT_REMOVE(ilu->ilu_event_hdl, eventid);
7475 7085 return (STMF_SUCCESS);
7476 7086 }
7477 7087
7478 7088 stmf_status_t
7479 7089 stmf_lport_add_event(stmf_local_port_t *lport, int eventid)
7480 7090 {
7481 7091 stmf_i_local_port_t *ilport =
7482 7092 (stmf_i_local_port_t *)lport->lport_stmf_private;
7483 7093
7484 7094 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7485 7095 return (STMF_INVALID_ARG);
7486 7096 }
7487 7097
7488 7098 STMF_EVENT_ADD(ilport->ilport_event_hdl, eventid);
7489 7099 return (STMF_SUCCESS);
7490 7100 }
7491 7101
7492 7102 stmf_status_t
7493 7103 stmf_lport_remove_event(stmf_local_port_t *lport, int eventid)
7494 7104 {
7495 7105 stmf_i_local_port_t *ilport =
7496 7106 (stmf_i_local_port_t *)lport->lport_stmf_private;
7497 7107
7498 7108 if (eventid == STMF_EVENT_ALL) {
7499 7109 STMF_EVENT_CLEAR_ALL(ilport->ilport_event_hdl);
7500 7110 return (STMF_SUCCESS);
7501 7111 }
7502 7112
7503 7113 if ((eventid < 0) || (eventid >= STMF_MAX_NUM_EVENTS)) {
7504 7114 return (STMF_INVALID_ARG);
7505 7115 }
7506 7116
7507 7117 STMF_EVENT_REMOVE(ilport->ilport_event_hdl, eventid);
7508 7118 return (STMF_SUCCESS);
7509 7119 }
7510 7120
7511 7121 void
7512 7122 stmf_generate_lu_event(stmf_i_lu_t *ilu, int eventid, void *arg, uint32_t flags)
7513 7123 {
7514 7124 if (STMF_EVENT_ENABLED(ilu->ilu_event_hdl, eventid) &&
7515 7125 (ilu->ilu_lu->lu_event_handler != NULL)) {
7516 7126 ilu->ilu_lu->lu_event_handler(ilu->ilu_lu, eventid, arg, flags);
7517 7127 }
7518 7128 }
7519 7129
7520 7130 void
7521 7131 stmf_generate_lport_event(stmf_i_local_port_t *ilport, int eventid, void *arg,
7522 7132 uint32_t flags)
7523 7133 {
7524 7134 if (STMF_EVENT_ENABLED(ilport->ilport_event_hdl, eventid) &&
7525 7135 (ilport->ilport_lport->lport_event_handler != NULL)) {
7526 7136 ilport->ilport_lport->lport_event_handler(
7527 7137 ilport->ilport_lport, eventid, arg, flags);
7528 7138 }
7529 7139 }
7530 7140
7531 7141 /*
7532 7142 * With the possibility of having multiple itl sessions pointing to the
7533 7143 * same itl_kstat_info, the ilu_kstat_lock mutex is used to synchronize
7534 7144 * the kstat update of the ilu_kstat_io, itl_kstat_taskq and itl_kstat_lu_xfer
7535 7145 * statistics.
7536 7146 */
|
↓ open down ↓ |
1401 lines elided |
↑ open up ↑ |
7537 7147 void
7538 7148 stmf_itl_task_start(stmf_i_scsi_task_t *itask)
7539 7149 {
7540 7150 stmf_itl_data_t *itl = itask->itask_itl_datap;
7541 7151 scsi_task_t *task = itask->itask_task;
7542 7152 stmf_i_lu_t *ilu;
7543 7153
7544 7154 if (itl == NULL || task->task_lu == dlun0)
7545 7155 return;
7546 7156 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7547 - mutex_enter(ilu->ilu_kstat_io->ks_lock);
7548 7157 itask->itask_start_timestamp = gethrtime();
7549 - kstat_waitq_enter(KSTAT_IO_PTR(itl->itl_kstat_taskq));
7550 - stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_enter);
7551 - mutex_exit(ilu->ilu_kstat_io->ks_lock);
7158 + if (ilu->ilu_kstat_io != NULL) {
7159 + mutex_enter(ilu->ilu_kstat_io->ks_lock);
7160 + stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_enter);
7161 + mutex_exit(ilu->ilu_kstat_io->ks_lock);
7162 + }
7552 7163
7553 7164 stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_enter);
7554 7165 }
7555 7166
7556 7167 void
7557 7168 stmf_itl_lu_new_task(stmf_i_scsi_task_t *itask)
7558 7169 {
7559 7170 stmf_itl_data_t *itl = itask->itask_itl_datap;
7560 7171 scsi_task_t *task = itask->itask_task;
7561 7172 stmf_i_lu_t *ilu;
7562 7173
7563 7174 if (itl == NULL || task->task_lu == dlun0)
7564 7175 return;
7565 7176 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7566 - mutex_enter(ilu->ilu_kstat_io->ks_lock);
7567 - kstat_waitq_to_runq(KSTAT_IO_PTR(itl->itl_kstat_taskq));
7568 - stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_to_runq);
7569 - mutex_exit(ilu->ilu_kstat_io->ks_lock);
7177 + if (ilu->ilu_kstat_io != NULL) {
7178 + mutex_enter(ilu->ilu_kstat_io->ks_lock);
7179 + stmf_update_kstat_lu_q(itask->itask_task, kstat_waitq_to_runq);
7180 + mutex_exit(ilu->ilu_kstat_io->ks_lock);
7181 + }
7570 7182
7571 7183 stmf_update_kstat_lport_q(itask->itask_task, kstat_waitq_to_runq);
7572 7184 }
7573 7185
7574 7186 void
7575 7187 stmf_itl_task_done(stmf_i_scsi_task_t *itask)
7576 7188 {
7577 7189 stmf_itl_data_t *itl = itask->itask_itl_datap;
7578 7190 scsi_task_t *task = itask->itask_task;
7579 - kstat_io_t *kip;
7580 - hrtime_t elapsed_time;
7581 - stmf_kstat_itl_info_t *itli;
7582 7191 stmf_i_lu_t *ilu;
7583 7192
7193 + itask->itask_done_timestamp = gethrtime();
7194 +
7584 7195 if (itl == NULL || task->task_lu == dlun0)
7585 7196 return;
7586 7197 ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7587 7198
7199 + if (ilu->ilu_kstat_io == NULL)
7200 + return;
7201 +
7588 7202 mutex_enter(ilu->ilu_kstat_io->ks_lock);
7589 - itli = (stmf_kstat_itl_info_t *)KSTAT_NAMED_PTR(itl->itl_kstat_info);
7590 - kip = KSTAT_IO_PTR(itl->itl_kstat_taskq);
7591 7203
7592 - itli->i_task_waitq_elapsed.value.ui64 += itask->itask_waitq_time;
7593 -
7594 - itask->itask_done_timestamp = gethrtime();
7595 - elapsed_time =
7596 - itask->itask_done_timestamp - itask->itask_start_timestamp;
7597 -
7598 - if (task->task_flags & TF_READ_DATA) {
7599 - kip->reads++;
7600 - kip->nread += itask->itask_read_xfer;
7601 - itli->i_task_read_elapsed.value.ui64 += elapsed_time;
7602 - itli->i_lu_read_elapsed.value.ui64 +=
7603 - itask->itask_lu_read_time;
7604 - itli->i_lport_read_elapsed.value.ui64 +=
7605 - itask->itask_lport_read_time;
7606 - }
7607 -
7608 - if (task->task_flags & TF_WRITE_DATA) {
7609 - kip->writes++;
7610 - kip->nwritten += itask->itask_write_xfer;
7611 - itli->i_task_write_elapsed.value.ui64 += elapsed_time;
7612 - itli->i_lu_write_elapsed.value.ui64 +=
7613 - itask->itask_lu_write_time;
7614 - itli->i_lport_write_elapsed.value.ui64 +=
7615 - itask->itask_lport_write_time;
7616 - }
7617 -
7618 7204 if (itask->itask_flags & ITASK_KSTAT_IN_RUNQ) {
7619 - kstat_runq_exit(kip);
7620 7205 stmf_update_kstat_lu_q(task, kstat_runq_exit);
7621 7206 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7622 7207 stmf_update_kstat_lport_q(task, kstat_runq_exit);
7623 7208 } else {
7624 - kstat_waitq_exit(kip);
7625 7209 stmf_update_kstat_lu_q(task, kstat_waitq_exit);
7626 7210 mutex_exit(ilu->ilu_kstat_io->ks_lock);
7627 7211 stmf_update_kstat_lport_q(task, kstat_waitq_exit);
7628 7212 }
7629 7213 }
7630 7214
7631 -void
7632 -stmf_lu_xfer_start(scsi_task_t *task)
7633 -{
7634 - stmf_i_scsi_task_t *itask = task->task_stmf_private;
7635 - stmf_itl_data_t *itl = itask->itask_itl_datap;
7636 - stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7637 - kstat_io_t *kip;
7638 -
7639 - if (itl == NULL || task->task_lu == dlun0)
7640 - return;
7641 -
7642 - kip = KSTAT_IO_PTR(itl->itl_kstat_lu_xfer);
7643 - mutex_enter(ilu->ilu_kstat_io->ks_lock);
7644 - kstat_runq_enter(kip);
7645 - mutex_exit(ilu->ilu_kstat_io->ks_lock);
7646 -}
7647 -
7648 -void
7649 -stmf_lu_xfer_done(scsi_task_t *task, boolean_t read, uint64_t xfer_bytes,
7650 - hrtime_t elapsed_time)
7651 -{
7652 - stmf_i_scsi_task_t *itask = task->task_stmf_private;
7653 - stmf_itl_data_t *itl = itask->itask_itl_datap;
7654 - stmf_i_lu_t *ilu = (stmf_i_lu_t *)task->task_lu->lu_stmf_private;
7655 - kstat_io_t *kip;
7656 -
7657 - if (itl == NULL || task->task_lu == dlun0)
7658 - return;
7659 -
7660 - if (read) {
7661 - atomic_add_64((uint64_t *)&itask->itask_lu_read_time,
7662 - elapsed_time);
7663 - } else {
7664 - atomic_add_64((uint64_t *)&itask->itask_lu_write_time,
7665 - elapsed_time);
7666 - }
7667 -
7668 - kip = KSTAT_IO_PTR(itl->itl_kstat_lu_xfer);
7669 - mutex_enter(ilu->ilu_kstat_io->ks_lock);
7670 - kstat_runq_exit(kip);
7671 - if (read) {
7672 - kip->reads++;
7673 - kip->nread += xfer_bytes;
7674 - } else {
7675 - kip->writes++;
7676 - kip->nwritten += xfer_bytes;
7677 - }
7678 - mutex_exit(ilu->ilu_kstat_io->ks_lock);
7679 -}
7680 -
7681 7215 static void
7682 7216 stmf_lport_xfer_start(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf)
7683 7217 {
7684 7218 stmf_itl_data_t *itl = itask->itask_itl_datap;
7685 7219
7686 7220 if (itl == NULL)
7687 7221 return;
7688 7222
7689 7223 DTRACE_PROBE2(scsi__xfer__start, scsi_task_t *, itask->itask_task,
7690 7224 stmf_data_buf_t *, dbuf);
7691 7225
7692 7226 dbuf->db_xfer_start_timestamp = gethrtime();
7693 7227 }
7694 7228
7695 7229 static void
7696 7230 stmf_lport_xfer_done(stmf_i_scsi_task_t *itask, stmf_data_buf_t *dbuf)
7697 7231 {
7698 7232 stmf_itl_data_t *itl = itask->itask_itl_datap;
7699 - scsi_task_t *task;
7700 - stmf_i_local_port_t *ilp;
7701 - kstat_io_t *kip;
7702 7233 hrtime_t elapsed_time;
7703 7234 uint64_t xfer_size;
7704 7235
7705 7236 if (itl == NULL)
7706 7237 return;
7707 7238
7708 - task = (scsi_task_t *)itask->itask_task;
7709 - ilp = (stmf_i_local_port_t *)task->task_lport->lport_stmf_private;
7710 7239 xfer_size = (dbuf->db_xfer_status == STMF_SUCCESS) ?
7711 7240 dbuf->db_data_size : 0;
7712 7241
7713 7242 elapsed_time = gethrtime() - dbuf->db_xfer_start_timestamp;
7714 7243 if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
7715 7244 atomic_add_64((uint64_t *)&itask->itask_lport_read_time,
7716 7245 elapsed_time);
7717 7246 atomic_add_64((uint64_t *)&itask->itask_read_xfer,
7718 7247 xfer_size);
7719 7248 } else {
7720 7249 atomic_add_64((uint64_t *)&itask->itask_lport_write_time,
7721 7250 elapsed_time);
7722 7251 atomic_add_64((uint64_t *)&itask->itask_write_xfer,
7723 7252 xfer_size);
7724 7253 }
7725 7254
7726 7255 DTRACE_PROBE3(scsi__xfer__end, scsi_task_t *, itask->itask_task,
7727 7256 stmf_data_buf_t *, dbuf, hrtime_t, elapsed_time);
7728 7257
7729 - kip = KSTAT_IO_PTR(itl->itl_kstat_lport_xfer);
7730 - mutex_enter(ilp->ilport_kstat_io->ks_lock);
7731 - if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) {
7732 - kip->reads++;
7733 - kip->nread += xfer_size;
7734 - } else {
7735 - kip->writes++;
7736 - kip->nwritten += xfer_size;
7737 - }
7738 - mutex_exit(ilp->ilport_kstat_io->ks_lock);
7739 -
7740 7258 dbuf->db_xfer_start_timestamp = 0;
7741 7259 }
7742 7260
7743 7261 void
7744 7262 stmf_svc_init()
7745 7263 {
7746 7264 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED)
7747 7265 return;
7748 7266 stmf_state.stmf_svc_tailp = &stmf_state.stmf_svc_active;
7749 7267 stmf_state.stmf_svc_taskq = ddi_taskq_create(0, "STMF_SVC_TASKQ", 1,
7750 7268 TASKQ_DEFAULTPRI, 0);
7751 7269 (void) ddi_taskq_dispatch(stmf_state.stmf_svc_taskq,
7752 7270 stmf_svc, 0, DDI_SLEEP);
7753 7271 }
7754 7272
7755 7273 stmf_status_t
7756 7274 stmf_svc_fini()
7757 7275 {
7758 7276 uint32_t i;
7759 7277
7760 7278 mutex_enter(&stmf_state.stmf_lock);
7761 7279 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED) {
7762 7280 stmf_state.stmf_svc_flags |= STMF_SVC_TERMINATE;
7763 7281 cv_signal(&stmf_state.stmf_cv);
7764 7282 }
7765 7283 mutex_exit(&stmf_state.stmf_lock);
7766 7284
7767 7285 /* Wait for 5 seconds */
7768 7286 for (i = 0; i < 500; i++) {
7769 7287 if (stmf_state.stmf_svc_flags & STMF_SVC_STARTED)
7770 7288 delay(drv_usectohz(10000));
7771 7289 else
7772 7290 break;
7773 7291 }
7774 7292 if (i == 500)
7775 7293 return (STMF_BUSY);
7776 7294
7777 7295 ddi_taskq_destroy(stmf_state.stmf_svc_taskq);
7778 7296
7779 7297 return (STMF_SUCCESS);
7780 7298 }
7781 7299
7782 7300 struct stmf_svc_clocks {
7783 7301 clock_t drain_start, drain_next;
7784 7302 clock_t timing_start, timing_next;
7785 7303 clock_t worker_delay;
7786 7304 };
7787 7305
7788 7306 /* ARGSUSED */
7789 7307 void
7790 7308 stmf_svc(void *arg)
7791 7309 {
7792 7310 stmf_svc_req_t *req;
7793 7311 stmf_lu_t *lu;
7794 7312 stmf_i_lu_t *ilu;
7795 7313 stmf_local_port_t *lport;
7796 7314 struct stmf_svc_clocks clks = { 0 };
7797 7315
7798 7316 mutex_enter(&stmf_state.stmf_lock);
7799 7317 stmf_state.stmf_svc_flags |= STMF_SVC_STARTED | STMF_SVC_ACTIVE;
7800 7318
7801 7319 while (!(stmf_state.stmf_svc_flags & STMF_SVC_TERMINATE)) {
7802 7320 if (stmf_state.stmf_svc_active == NULL) {
7803 7321 stmf_svc_timeout(&clks);
7804 7322 continue;
7805 7323 }
7806 7324
7807 7325 /*
7808 7326 * Pop the front request from the active list. After this,
7809 7327 * the request will no longer be referenced by global state,
7810 7328 * so it should be safe to access it without holding the
7811 7329 * stmf state lock.
7812 7330 */
7813 7331 req = stmf_state.stmf_svc_active;
7814 7332 stmf_state.stmf_svc_active = req->svc_next;
7815 7333
7816 7334 if (stmf_state.stmf_svc_active == NULL)
7817 7335 stmf_state.stmf_svc_tailp = &stmf_state.stmf_svc_active;
7818 7336
7819 7337 switch (req->svc_cmd) {
7820 7338 case STMF_CMD_LPORT_ONLINE:
7821 7339 /* Fallthrough */
7822 7340 case STMF_CMD_LPORT_OFFLINE:
7823 7341 mutex_exit(&stmf_state.stmf_lock);
7824 7342 lport = (stmf_local_port_t *)req->svc_obj;
7825 7343 lport->lport_ctl(lport, req->svc_cmd, &req->svc_info);
7826 7344 break;
7827 7345 case STMF_CMD_LU_ONLINE:
7828 7346 mutex_exit(&stmf_state.stmf_lock);
7829 7347 lu = (stmf_lu_t *)req->svc_obj;
7830 7348 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info);
|
↓ open down ↓ |
81 lines elided |
↑ open up ↑ |
7831 7349 break;
7832 7350 case STMF_CMD_LU_OFFLINE:
7833 7351 /* Remove all mappings of this LU */
7834 7352 stmf_session_lu_unmapall((stmf_lu_t *)req->svc_obj);
7835 7353 /* Kill all the pending I/Os for this LU */
7836 7354 mutex_exit(&stmf_state.stmf_lock);
7837 7355 stmf_task_lu_killall((stmf_lu_t *)req->svc_obj, NULL,
7838 7356 STMF_ABORTED);
7839 7357 lu = (stmf_lu_t *)req->svc_obj;
7840 7358 ilu = (stmf_i_lu_t *)lu->lu_stmf_private;
7841 - if (ilu->ilu_ntasks != ilu->ilu_ntasks_free)
7842 - break;
7359 + stmf_wait_ilu_tasks_finish(ilu);
7843 7360 lu->lu_ctl(lu, req->svc_cmd, &req->svc_info);
7844 7361 break;
7845 7362 default:
7846 7363 cmn_err(CE_PANIC, "stmf_svc: unknown cmd %d",
7847 7364 req->svc_cmd);
7848 7365 }
7849 7366
7367 + kmem_free(req, req->svc_req_alloc_size);
7850 7368 mutex_enter(&stmf_state.stmf_lock);
7851 7369 }
7852 7370
7853 7371 stmf_state.stmf_svc_flags &= ~(STMF_SVC_STARTED | STMF_SVC_ACTIVE);
7854 7372 mutex_exit(&stmf_state.stmf_lock);
7855 7373 }
7856 7374
7857 7375 static void
7858 7376 stmf_svc_timeout(struct stmf_svc_clocks *clks)
7859 7377 {
7860 7378 clock_t td;
7861 7379 stmf_i_local_port_t *ilport, *next_ilport;
7862 7380 stmf_i_scsi_session_t *iss;
7863 7381
7864 7382 ASSERT(mutex_owned(&stmf_state.stmf_lock));
7865 7383
7866 7384 td = drv_usectohz(20000);
7867 7385
7868 7386 /* Do timeouts */
7869 7387 if (stmf_state.stmf_nlus &&
7870 7388 ((!clks->timing_next) || (ddi_get_lbolt() >= clks->timing_next))) {
7871 7389 if (!stmf_state.stmf_svc_ilu_timing) {
7872 7390 /* we are starting a new round */
7873 7391 stmf_state.stmf_svc_ilu_timing =
7874 7392 stmf_state.stmf_ilulist;
7875 7393 clks->timing_start = ddi_get_lbolt();
7876 7394 }
7877 7395
7878 7396 stmf_check_ilu_timing();
7879 7397 if (!stmf_state.stmf_svc_ilu_timing) {
7880 7398 /* we finished a complete round */
7881 7399 clks->timing_next =
7882 7400 clks->timing_start + drv_usectohz(5*1000*1000);
7883 7401 } else {
7884 7402 /* we still have some ilu items to check */
7885 7403 clks->timing_next =
7886 7404 ddi_get_lbolt() + drv_usectohz(1*1000*1000);
7887 7405 }
7888 7406
7889 7407 if (stmf_state.stmf_svc_active)
7890 7408 return;
7891 7409 }
7892 7410
7893 7411 /* Check if there are free tasks to clear */
7894 7412 if (stmf_state.stmf_nlus &&
7895 7413 ((!clks->drain_next) || (ddi_get_lbolt() >= clks->drain_next))) {
7896 7414 if (!stmf_state.stmf_svc_ilu_draining) {
7897 7415 /* we are starting a new round */
7898 7416 stmf_state.stmf_svc_ilu_draining =
7899 7417 stmf_state.stmf_ilulist;
7900 7418 clks->drain_start = ddi_get_lbolt();
7901 7419 }
7902 7420
7903 7421 stmf_check_freetask();
7904 7422 if (!stmf_state.stmf_svc_ilu_draining) {
7905 7423 /* we finished a complete round */
7906 7424 clks->drain_next =
7907 7425 clks->drain_start + drv_usectohz(10*1000*1000);
7908 7426 } else {
7909 7427 /* we still have some ilu items to check */
7910 7428 clks->drain_next =
7911 7429 ddi_get_lbolt() + drv_usectohz(1*1000*1000);
7912 7430 }
7913 7431
7914 7432 if (stmf_state.stmf_svc_active)
7915 7433 return;
7916 7434 }
7917 7435
7918 7436 /* Check if we need to run worker_mgmt */
7919 7437 if (ddi_get_lbolt() > clks->worker_delay) {
7920 7438 stmf_worker_mgmt();
7921 7439 clks->worker_delay = ddi_get_lbolt() +
7922 7440 stmf_worker_mgmt_delay;
7923 7441 }
7924 7442
7925 7443 /* Check if any active session got its 1st LUN */
7926 7444 if (stmf_state.stmf_process_initial_luns) {
7927 7445 int stmf_level = 0;
7928 7446 int port_level;
7929 7447
7930 7448 for (ilport = stmf_state.stmf_ilportlist; ilport;
7931 7449 ilport = next_ilport) {
7932 7450 int ilport_lock_held;
7933 7451 next_ilport = ilport->ilport_next;
7934 7452
7935 7453 if ((ilport->ilport_flags &
7936 7454 ILPORT_SS_GOT_INITIAL_LUNS) == 0)
7937 7455 continue;
7938 7456
7939 7457 port_level = 0;
7940 7458 rw_enter(&ilport->ilport_lock, RW_READER);
7941 7459 ilport_lock_held = 1;
7942 7460
7943 7461 for (iss = ilport->ilport_ss_list; iss;
7944 7462 iss = iss->iss_next) {
7945 7463 if ((iss->iss_flags &
7946 7464 ISS_GOT_INITIAL_LUNS) == 0)
7947 7465 continue;
7948 7466
7949 7467 port_level++;
7950 7468 stmf_level++;
7951 7469 atomic_and_32(&iss->iss_flags,
7952 7470 ~ISS_GOT_INITIAL_LUNS);
7953 7471 atomic_or_32(&iss->iss_flags,
7954 7472 ISS_EVENT_ACTIVE);
7955 7473 rw_exit(&ilport->ilport_lock);
7956 7474 ilport_lock_held = 0;
7957 7475 mutex_exit(&stmf_state.stmf_lock);
7958 7476 stmf_generate_lport_event(ilport,
7959 7477 LPORT_EVENT_INITIAL_LUN_MAPPED,
7960 7478 iss->iss_ss, 0);
7961 7479 atomic_and_32(&iss->iss_flags,
7962 7480 ~ISS_EVENT_ACTIVE);
7963 7481 mutex_enter(&stmf_state.stmf_lock);
7964 7482 /*
7965 7483 * scan all the ilports again as the
7966 7484 * ilport list might have changed.
7967 7485 */
7968 7486 next_ilport = stmf_state.stmf_ilportlist;
7969 7487 break;
7970 7488 }
7971 7489
7972 7490 if (port_level == 0)
7973 7491 atomic_and_32(&ilport->ilport_flags,
7974 7492 ~ILPORT_SS_GOT_INITIAL_LUNS);
7975 7493 /* drop the lock if we are holding it. */
7976 7494 if (ilport_lock_held == 1)
7977 7495 rw_exit(&ilport->ilport_lock);
7978 7496
7979 7497 /* Max 4 session at a time */
7980 7498 if (stmf_level >= 4)
7981 7499 break;
7982 7500 }
7983 7501
|
↓ open down ↓ |
124 lines elided |
↑ open up ↑ |
7984 7502 if (stmf_level == 0)
7985 7503 stmf_state.stmf_process_initial_luns = 0;
7986 7504 }
7987 7505
7988 7506 stmf_state.stmf_svc_flags &= ~STMF_SVC_ACTIVE;
7989 7507 (void) cv_reltimedwait(&stmf_state.stmf_cv,
7990 7508 &stmf_state.stmf_lock, td, TR_CLOCK_TICK);
7991 7509 stmf_state.stmf_svc_flags |= STMF_SVC_ACTIVE;
7992 7510 }
7993 7511
7512 +/*
7513 + * Waits for ongoing I/O tasks to finish on an LU in preparation for
7514 + * the LU's offlining. The LU should already be in an Offlining state
7515 + * (otherwise I/O to the LU might never end). There is an additional
7516 + * enforcement of this via a deadman timer check.
7517 + */
7518 +static void
7519 +stmf_wait_ilu_tasks_finish(stmf_i_lu_t *ilu)
7520 +{
7521 + clock_t start, now, deadline;
7522 +
7523 + start = now = ddi_get_lbolt();
7524 + deadline = start + drv_usectohz(stmf_io_deadman * 1000000llu);
7525 + mutex_enter(&ilu->ilu_task_lock);
7526 + while (ilu->ilu_ntasks != ilu->ilu_ntasks_free) {
7527 + (void) cv_timedwait(&ilu->ilu_offline_pending_cv,
7528 + &ilu->ilu_task_lock, deadline);
7529 + now = ddi_get_lbolt();
7530 + if (now > deadline) {
7531 + if (stmf_io_deadman_enabled) {
7532 + cmn_err(CE_PANIC, "stmf_svc: I/O deadman hit "
7533 + "on STMF_CMD_LU_OFFLINE after %d seconds",
7534 + stmf_io_deadman);
7535 + } else {
7536 + /* keep on spinning */
7537 + deadline = now + drv_usectohz(stmf_io_deadman *
7538 + 1000000llu);
7539 + }
7540 + }
7541 + }
7542 + mutex_exit(&ilu->ilu_task_lock);
7543 + DTRACE_PROBE1(deadman__timeout__wait, clock_t, now - start);
7544 +}
7545 +
7994 7546 void
7995 7547 stmf_svc_queue(int cmd, void *obj, stmf_state_change_info_t *info)
7996 7548 {
7997 7549 stmf_svc_req_t *req;
7998 7550 int s;
7999 7551
8000 7552 ASSERT(!mutex_owned(&stmf_state.stmf_lock));
8001 7553 s = sizeof (stmf_svc_req_t);
8002 7554 if (info->st_additional_info) {
8003 7555 s += strlen(info->st_additional_info) + 1;
8004 7556 }
8005 7557 req = kmem_zalloc(s, KM_SLEEP);
8006 7558
8007 7559 req->svc_cmd = cmd;
8008 7560 req->svc_obj = obj;
8009 7561 req->svc_info.st_rflags = info->st_rflags;
8010 7562 if (info->st_additional_info) {
8011 7563 req->svc_info.st_additional_info = (char *)(GET_BYTE_OFFSET(req,
8012 7564 sizeof (stmf_svc_req_t)));
8013 7565 (void) strcpy(req->svc_info.st_additional_info,
8014 7566 info->st_additional_info);
8015 7567 }
8016 7568 req->svc_req_alloc_size = s;
8017 7569 req->svc_next = NULL;
8018 7570
8019 7571 mutex_enter(&stmf_state.stmf_lock);
8020 7572 *stmf_state.stmf_svc_tailp = req;
8021 7573 stmf_state.stmf_svc_tailp = &req->svc_next;
8022 7574 if ((stmf_state.stmf_svc_flags & STMF_SVC_ACTIVE) == 0) {
8023 7575 cv_signal(&stmf_state.stmf_cv);
8024 7576 }
8025 7577 mutex_exit(&stmf_state.stmf_lock);
8026 7578 }
8027 7579
8028 7580 static void
8029 7581 stmf_svc_kill_obj_requests(void *obj)
8030 7582 {
8031 7583 stmf_svc_req_t *prev_req = NULL;
8032 7584 stmf_svc_req_t *next_req;
8033 7585 stmf_svc_req_t *req;
8034 7586
8035 7587 ASSERT(mutex_owned(&stmf_state.stmf_lock));
8036 7588
8037 7589 for (req = stmf_state.stmf_svc_active; req != NULL; req = next_req) {
8038 7590 next_req = req->svc_next;
8039 7591
8040 7592 if (req->svc_obj == obj) {
8041 7593 if (prev_req != NULL)
8042 7594 prev_req->svc_next = next_req;
8043 7595 else
8044 7596 stmf_state.stmf_svc_active = next_req;
8045 7597
8046 7598 if (next_req == NULL)
8047 7599 stmf_state.stmf_svc_tailp = (prev_req != NULL) ?
8048 7600 &prev_req->svc_next :
8049 7601 &stmf_state.stmf_svc_active;
8050 7602
8051 7603 kmem_free(req, req->svc_req_alloc_size);
8052 7604 } else {
8053 7605 prev_req = req;
8054 7606 }
8055 7607 }
8056 7608 }
8057 7609
8058 7610 void
8059 7611 stmf_trace(caddr_t ident, const char *fmt, ...)
8060 7612 {
8061 7613 va_list args;
8062 7614 char tbuf[160];
8063 7615 int len;
8064 7616
8065 7617 if (!stmf_trace_on)
8066 7618 return;
8067 7619 len = snprintf(tbuf, 158, "%s:%07lu: ", ident ? ident : "",
8068 7620 ddi_get_lbolt());
8069 7621 va_start(args, fmt);
8070 7622 len += vsnprintf(tbuf + len, 158 - len, fmt, args);
8071 7623 va_end(args);
8072 7624
8073 7625 if (len > 158) {
8074 7626 len = 158;
8075 7627 }
8076 7628 tbuf[len++] = '\n';
8077 7629 tbuf[len] = 0;
8078 7630
8079 7631 mutex_enter(&trace_buf_lock);
8080 7632 bcopy(tbuf, &stmf_trace_buf[trace_buf_curndx], len+1);
8081 7633 trace_buf_curndx += len;
8082 7634 if (trace_buf_curndx > (trace_buf_size - 320))
8083 7635 trace_buf_curndx = 0;
8084 7636 mutex_exit(&trace_buf_lock);
8085 7637 }
8086 7638
8087 7639 void
8088 7640 stmf_trace_clear()
8089 7641 {
8090 7642 if (!stmf_trace_on)
8091 7643 return;
8092 7644 mutex_enter(&trace_buf_lock);
8093 7645 trace_buf_curndx = 0;
8094 7646 if (trace_buf_size > 0)
8095 7647 stmf_trace_buf[0] = 0;
8096 7648 mutex_exit(&trace_buf_lock);
8097 7649 }
8098 7650
8099 7651 static void
8100 7652 stmf_abort_task_offline(scsi_task_t *task, int offline_lu, char *info)
8101 7653 {
8102 7654 stmf_state_change_info_t change_info;
8103 7655 void *ctl_private;
8104 7656 uint32_t ctl_cmd;
8105 7657 int msg = 0;
8106 7658
8107 7659 stmf_trace("FROM STMF", "abort_task_offline called for %s: %s",
8108 7660 offline_lu ? "LU" : "LPORT", info ? info : "no additional info");
8109 7661 change_info.st_additional_info = info;
8110 7662 if (offline_lu) {
8111 7663 change_info.st_rflags = STMF_RFLAG_RESET |
8112 7664 STMF_RFLAG_LU_ABORT;
8113 7665 ctl_private = task->task_lu;
8114 7666 if (((stmf_i_lu_t *)
8115 7667 task->task_lu->lu_stmf_private)->ilu_state ==
8116 7668 STMF_STATE_ONLINE) {
8117 7669 msg = 1;
8118 7670 }
8119 7671 ctl_cmd = STMF_CMD_LU_OFFLINE;
8120 7672 } else {
8121 7673 change_info.st_rflags = STMF_RFLAG_RESET |
8122 7674 STMF_RFLAG_LPORT_ABORT;
8123 7675 ctl_private = task->task_lport;
8124 7676 if (((stmf_i_local_port_t *)
8125 7677 task->task_lport->lport_stmf_private)->ilport_state ==
8126 7678 STMF_STATE_ONLINE) {
8127 7679 msg = 1;
8128 7680 }
8129 7681 ctl_cmd = STMF_CMD_LPORT_OFFLINE;
8130 7682 }
8131 7683
8132 7684 if (msg) {
8133 7685 stmf_trace(0, "Calling stmf_ctl to offline %s : %s",
8134 7686 offline_lu ? "LU" : "LPORT", info ? info :
8135 7687 "<no additional info>");
8136 7688 }
8137 7689 (void) stmf_ctl(ctl_cmd, ctl_private, &change_info);
8138 7690 }
8139 7691
8140 7692 static char
8141 7693 stmf_ctoi(char c)
8142 7694 {
8143 7695 if ((c >= '0') && (c <= '9'))
8144 7696 c -= '0';
8145 7697 else if ((c >= 'A') && (c <= 'F'))
8146 7698 c = c - 'A' + 10;
8147 7699 else if ((c >= 'a') && (c <= 'f'))
8148 7700 c = c - 'a' + 10;
8149 7701 else
8150 7702 c = -1;
8151 7703 return (c);
8152 7704 }
8153 7705
8154 7706 /* Convert from Hex value in ASCII format to the equivalent bytes */
8155 7707 static boolean_t
8156 7708 stmf_base16_str_to_binary(char *c, int dplen, uint8_t *dp)
8157 7709 {
8158 7710 int ii;
8159 7711
8160 7712 for (ii = 0; ii < dplen; ii++) {
8161 7713 char nibble1, nibble2;
8162 7714 char enc_char = *c++;
8163 7715 nibble1 = stmf_ctoi(enc_char);
8164 7716
8165 7717 enc_char = *c++;
8166 7718 nibble2 = stmf_ctoi(enc_char);
8167 7719 if (nibble1 == -1 || nibble2 == -1)
8168 7720 return (B_FALSE);
8169 7721
8170 7722 dp[ii] = (nibble1 << 4) | nibble2;
8171 7723 }
8172 7724 return (B_TRUE);
8173 7725 }
8174 7726
8175 7727 boolean_t
8176 7728 stmf_scsilib_tptid_validate(scsi_transport_id_t *tptid, uint32_t total_sz,
8177 7729 uint16_t *tptid_sz)
8178 7730 {
8179 7731 uint16_t tpd_len = SCSI_TPTID_SIZE;
8180 7732
8181 7733 if (tptid_sz)
8182 7734 *tptid_sz = 0;
8183 7735 if (total_sz < sizeof (scsi_transport_id_t))
8184 7736 return (B_FALSE);
8185 7737
8186 7738 switch (tptid->protocol_id) {
8187 7739
8188 7740 case PROTOCOL_FIBRE_CHANNEL:
8189 7741 /* FC Transport ID validation checks. SPC3 rev23, Table 284 */
8190 7742 if (total_sz < tpd_len || tptid->format_code != 0)
8191 7743 return (B_FALSE);
8192 7744 break;
8193 7745
8194 7746 case PROTOCOL_iSCSI:
8195 7747 {
8196 7748 iscsi_transport_id_t *iscsiid;
8197 7749 uint16_t adn_len, name_len;
8198 7750
8199 7751 /* Check for valid format code, SPC3 rev 23 Table 288 */
8200 7752 if ((total_sz < tpd_len) ||
8201 7753 (tptid->format_code != 0 && tptid->format_code != 1))
8202 7754 return (B_FALSE);
8203 7755
8204 7756 iscsiid = (iscsi_transport_id_t *)tptid;
8205 7757 adn_len = READ_SCSI16(iscsiid->add_len, uint16_t);
8206 7758 tpd_len = sizeof (iscsi_transport_id_t) + adn_len - 1;
8207 7759
8208 7760 /*
8209 7761 * iSCSI Transport ID validation checks.
8210 7762 * As per SPC3 rev 23 Section 7.5.4.6 and Table 289 & Table 290
8211 7763 */
8212 7764 if (adn_len < 20 || (adn_len % 4 != 0))
8213 7765 return (B_FALSE);
8214 7766
8215 7767 name_len = strnlen(iscsiid->iscsi_name, adn_len);
8216 7768 if (name_len == 0 || name_len >= adn_len)
8217 7769 return (B_FALSE);
8218 7770
8219 7771 /* If the format_code is 1 check for ISID seperator */
8220 7772 if ((tptid->format_code == 1) && (strstr(iscsiid->iscsi_name,
8221 7773 SCSI_TPTID_ISCSI_ISID_SEPERATOR) == NULL))
8222 7774 return (B_FALSE);
8223 7775
8224 7776 }
8225 7777 break;
8226 7778
8227 7779 case PROTOCOL_SRP:
8228 7780 /* SRP Transport ID validation checks. SPC3 rev23, Table 287 */
8229 7781 if (total_sz < tpd_len || tptid->format_code != 0)
8230 7782 return (B_FALSE);
8231 7783 break;
8232 7784
8233 7785 case PROTOCOL_PARALLEL_SCSI:
8234 7786 case PROTOCOL_SSA:
8235 7787 case PROTOCOL_IEEE_1394:
8236 7788 case PROTOCOL_SAS:
8237 7789 case PROTOCOL_ADT:
8238 7790 case PROTOCOL_ATAPI:
8239 7791 default:
8240 7792 {
8241 7793 stmf_dflt_scsi_tptid_t *dflttpd;
8242 7794
8243 7795 tpd_len = sizeof (stmf_dflt_scsi_tptid_t);
8244 7796 if (total_sz < tpd_len)
8245 7797 return (B_FALSE);
8246 7798 dflttpd = (stmf_dflt_scsi_tptid_t *)tptid;
8247 7799 tpd_len = tpd_len + SCSI_READ16(&dflttpd->ident_len) - 1;
8248 7800 if (total_sz < tpd_len)
8249 7801 return (B_FALSE);
8250 7802 }
8251 7803 break;
8252 7804 }
8253 7805 if (tptid_sz)
8254 7806 *tptid_sz = tpd_len;
8255 7807 return (B_TRUE);
8256 7808 }
8257 7809
8258 7810 boolean_t
8259 7811 stmf_scsilib_tptid_compare(scsi_transport_id_t *tpd1,
8260 7812 scsi_transport_id_t *tpd2)
8261 7813 {
8262 7814 if ((tpd1->protocol_id != tpd2->protocol_id) ||
8263 7815 (tpd1->format_code != tpd2->format_code))
8264 7816 return (B_FALSE);
8265 7817
8266 7818 switch (tpd1->protocol_id) {
8267 7819
8268 7820 case PROTOCOL_iSCSI:
8269 7821 {
8270 7822 iscsi_transport_id_t *iscsitpd1, *iscsitpd2;
8271 7823 uint16_t len;
8272 7824
8273 7825 iscsitpd1 = (iscsi_transport_id_t *)tpd1;
8274 7826 iscsitpd2 = (iscsi_transport_id_t *)tpd2;
8275 7827 len = SCSI_READ16(&iscsitpd1->add_len);
8276 7828 if ((memcmp(iscsitpd1->add_len, iscsitpd2->add_len, 2) != 0) ||
8277 7829 (memcmp(iscsitpd1->iscsi_name, iscsitpd2->iscsi_name, len)
8278 7830 != 0))
8279 7831 return (B_FALSE);
8280 7832 }
8281 7833 break;
8282 7834
8283 7835 case PROTOCOL_SRP:
8284 7836 {
8285 7837 scsi_srp_transport_id_t *srptpd1, *srptpd2;
8286 7838
8287 7839 srptpd1 = (scsi_srp_transport_id_t *)tpd1;
8288 7840 srptpd2 = (scsi_srp_transport_id_t *)tpd2;
8289 7841 if (memcmp(srptpd1->srp_name, srptpd2->srp_name,
8290 7842 sizeof (srptpd1->srp_name)) != 0)
8291 7843 return (B_FALSE);
8292 7844 }
8293 7845 break;
8294 7846
8295 7847 case PROTOCOL_FIBRE_CHANNEL:
8296 7848 {
8297 7849 scsi_fc_transport_id_t *fctpd1, *fctpd2;
8298 7850
8299 7851 fctpd1 = (scsi_fc_transport_id_t *)tpd1;
8300 7852 fctpd2 = (scsi_fc_transport_id_t *)tpd2;
8301 7853 if (memcmp(fctpd1->port_name, fctpd2->port_name,
8302 7854 sizeof (fctpd1->port_name)) != 0)
8303 7855 return (B_FALSE);
8304 7856 }
8305 7857 break;
8306 7858
8307 7859 case PROTOCOL_PARALLEL_SCSI:
8308 7860 case PROTOCOL_SSA:
8309 7861 case PROTOCOL_IEEE_1394:
8310 7862 case PROTOCOL_SAS:
8311 7863 case PROTOCOL_ADT:
8312 7864 case PROTOCOL_ATAPI:
8313 7865 default:
8314 7866 {
8315 7867 stmf_dflt_scsi_tptid_t *dflt1, *dflt2;
8316 7868 uint16_t len;
8317 7869
8318 7870 dflt1 = (stmf_dflt_scsi_tptid_t *)tpd1;
8319 7871 dflt2 = (stmf_dflt_scsi_tptid_t *)tpd2;
8320 7872 len = SCSI_READ16(&dflt1->ident_len);
8321 7873 if ((memcmp(dflt1->ident_len, dflt2->ident_len, 2) != 0) ||
8322 7874 (memcmp(dflt1->ident, dflt2->ident, len) != 0))
8323 7875 return (B_FALSE);
8324 7876 }
8325 7877 break;
8326 7878 }
8327 7879 return (B_TRUE);
8328 7880 }
8329 7881
8330 7882 /*
8331 7883 * Changes devid_desc to corresponding TransportID format
8332 7884 * Returns :- pointer to stmf_remote_port_t
8333 7885 * Note :- Allocates continous memory for stmf_remote_port_t and TransportID,
8334 7886 * This memory need to be freed when this remote_port is no longer
8335 7887 * used.
8336 7888 */
8337 7889 stmf_remote_port_t *
8338 7890 stmf_scsilib_devid_to_remote_port(scsi_devid_desc_t *devid)
8339 7891 {
8340 7892 struct scsi_fc_transport_id *fc_tpd;
8341 7893 struct iscsi_transport_id *iscsi_tpd;
8342 7894 struct scsi_srp_transport_id *srp_tpd;
8343 7895 struct stmf_dflt_scsi_tptid *dflt_tpd;
8344 7896 uint16_t ident_len, sz = 0;
8345 7897 stmf_remote_port_t *rpt = NULL;
8346 7898
8347 7899 ident_len = devid->ident_length;
8348 7900 ASSERT(ident_len);
8349 7901 switch (devid->protocol_id) {
8350 7902 case PROTOCOL_FIBRE_CHANNEL:
8351 7903 sz = sizeof (scsi_fc_transport_id_t);
8352 7904 rpt = stmf_remote_port_alloc(sz);
8353 7905 rpt->rport_tptid->format_code = 0;
8354 7906 rpt->rport_tptid->protocol_id = devid->protocol_id;
8355 7907 fc_tpd = (scsi_fc_transport_id_t *)rpt->rport_tptid;
8356 7908 /*
8357 7909 * convert from "wwn.xxxxxxxxxxxxxxxx" to 8-byte binary
8358 7910 * skip first 4 byte for "wwn."
8359 7911 */
8360 7912 ASSERT(strncmp("wwn.", (char *)devid->ident, 4) == 0);
8361 7913 if ((ident_len < SCSI_TPTID_FC_PORT_NAME_SIZE * 2 + 4) ||
8362 7914 !stmf_base16_str_to_binary((char *)devid->ident + 4,
8363 7915 SCSI_TPTID_FC_PORT_NAME_SIZE, fc_tpd->port_name))
8364 7916 goto devid_to_remote_port_fail;
8365 7917 break;
8366 7918
8367 7919 case PROTOCOL_iSCSI:
8368 7920 sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (iscsi_transport_id_t) +
8369 7921 ident_len - 1);
8370 7922 rpt = stmf_remote_port_alloc(sz);
8371 7923 rpt->rport_tptid->format_code = 0;
8372 7924 rpt->rport_tptid->protocol_id = devid->protocol_id;
8373 7925 iscsi_tpd = (iscsi_transport_id_t *)rpt->rport_tptid;
8374 7926 SCSI_WRITE16(iscsi_tpd->add_len, ident_len);
8375 7927 (void) memcpy(iscsi_tpd->iscsi_name, devid->ident, ident_len);
8376 7928 break;
8377 7929
8378 7930 case PROTOCOL_SRP:
8379 7931 sz = sizeof (scsi_srp_transport_id_t);
8380 7932 rpt = stmf_remote_port_alloc(sz);
8381 7933 rpt->rport_tptid->format_code = 0;
8382 7934 rpt->rport_tptid->protocol_id = devid->protocol_id;
8383 7935 srp_tpd = (scsi_srp_transport_id_t *)rpt->rport_tptid;
8384 7936 /*
8385 7937 * convert from "eui.xxxxxxxxxxxxxxx" to 8-byte binary
8386 7938 * skip first 4 byte for "eui."
8387 7939 * Assume 8-byte initiator-extension part of srp_name is NOT
8388 7940 * stored in devid and hence will be set as zero
8389 7941 */
8390 7942 ASSERT(strncmp("eui.", (char *)devid->ident, 4) == 0);
8391 7943 if ((ident_len < (SCSI_TPTID_SRP_PORT_NAME_LEN - 8) * 2 + 4) ||
8392 7944 !stmf_base16_str_to_binary((char *)devid->ident+4,
8393 7945 SCSI_TPTID_SRP_PORT_NAME_LEN, srp_tpd->srp_name))
8394 7946 goto devid_to_remote_port_fail;
8395 7947 break;
8396 7948
8397 7949 case PROTOCOL_PARALLEL_SCSI:
8398 7950 case PROTOCOL_SSA:
8399 7951 case PROTOCOL_IEEE_1394:
8400 7952 case PROTOCOL_SAS:
8401 7953 case PROTOCOL_ADT:
8402 7954 case PROTOCOL_ATAPI:
8403 7955 default :
8404 7956 ident_len = devid->ident_length;
8405 7957 sz = ALIGNED_TO_8BYTE_BOUNDARY(sizeof (stmf_dflt_scsi_tptid_t) +
8406 7958 ident_len - 1);
8407 7959 rpt = stmf_remote_port_alloc(sz);
8408 7960 rpt->rport_tptid->format_code = 0;
8409 7961 rpt->rport_tptid->protocol_id = devid->protocol_id;
8410 7962 dflt_tpd = (stmf_dflt_scsi_tptid_t *)rpt->rport_tptid;
8411 7963 SCSI_WRITE16(dflt_tpd->ident_len, ident_len);
8412 7964 (void) memcpy(dflt_tpd->ident, devid->ident, ident_len);
8413 7965 break;
8414 7966 }
8415 7967 return (rpt);
8416 7968
8417 7969 devid_to_remote_port_fail:
8418 7970 stmf_remote_port_free(rpt);
8419 7971 return (NULL);
8420 7972
8421 7973 }
8422 7974
8423 7975 stmf_remote_port_t *
8424 7976 stmf_remote_port_alloc(uint16_t tptid_sz) {
8425 7977 stmf_remote_port_t *rpt;
8426 7978 rpt = (stmf_remote_port_t *)kmem_zalloc(
8427 7979 sizeof (stmf_remote_port_t) + tptid_sz, KM_SLEEP);
8428 7980 rpt->rport_tptid_sz = tptid_sz;
8429 7981 rpt->rport_tptid = (scsi_transport_id_t *)(rpt + 1);
8430 7982 return (rpt);
8431 7983 }
8432 7984
8433 7985 void
8434 7986 stmf_remote_port_free(stmf_remote_port_t *rpt)
8435 7987 {
8436 7988 /*
8437 7989 * Note: stmf_scsilib_devid_to_remote_port() function allocates
8438 7990 * remote port structures for all transports in the same way, So
8439 7991 * it is safe to deallocate it in a protocol independent manner.
8440 7992 * If any of the allocation method changes, corresponding changes
8441 7993 * need to be made here too.
8442 7994 */
8443 7995 kmem_free(rpt, sizeof (stmf_remote_port_t) + rpt->rport_tptid_sz);
8444 7996 }
|
↓ open down ↓ |
441 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX