1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21 /*
22 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
23 *
24 * Copyright 2016 Nexenta Systems, Inc. All rights reserved.
25 * Copyright (c) 2013 by Delphix. All rights reserved.
26 */
27 #ifndef _STMF_IMPL_H
28 #define _STMF_IMPL_H
29
30 #include <sys/stmf_defines.h>
31 #include <sys/stmf_ioctl.h>
32
33 #ifdef __cplusplus
34 extern "C" {
35 #endif
36
37 typedef uint32_t stmf_event_handle_t;
38 #define STMF_MAX_NUM_EVENTS (sizeof (stmf_event_handle_t) * 8)
39 #define STMF_EVENT_ADD(h, e) (atomic_or_32(&(h), \
40 ((uint32_t)1) << (e)))
41 #define STMF_EVENT_REMOVE(h, e) (atomic_and_32(&(h), \
42 ~(((uint32_t)1) << (e))))
43 #define STMF_EVENT_ENABLED(h, e) (((h) & ((uint32_t)1) << (e)) != 0)
44 #define STMF_EVENT_CLEAR_ALL(h) ((h) = 0)
45 #define STMF_EVENT_ALLOC_HANDLE(h) ((h) = 0)
46 #define STMF_EVENT_FREE_HANDLE(h) ((h) = 0)
47
48 #define STMF_TGT_NAME_LEN 256
49 #define STMF_GUID_INPUT 32
50
51 #define STMF_UPDATE_KSTAT_IO(kip, dbuf) \
52 if (dbuf->db_flags & DB_DIRECTION_TO_RPORT) { \
53 kip->reads++; \
54 kip->nread += dbuf->db_data_size; \
55 } else { \
56 kip->writes++; \
57 kip->nwritten += dbuf->db_data_size; \
58 }
59
60 struct stmf_i_scsi_task;
61 struct stmf_itl_data;
62
63 typedef struct stmf_i_lu_provider {
64 stmf_lu_provider_t *ilp_lp;
65 uint32_t ilp_alloc_size;
66 uint32_t ilp_nlus; /* # LUNs being exported */
67 uint32_t ilp_cb_in_progress:1,
68 ilp_rsvd:31;
69 struct stmf_i_lu_provider *ilp_next;
70 struct stmf_pp_data *ilp_ppd;
71 } stmf_i_lu_provider_t;
72
73 typedef struct stmf_i_lu {
74 stmf_lu_t *ilu_lu;
75 uint32_t ilu_alloc_size;
76 uint32_t ilu_flags;
77 uint32_t ilu_ref_cnt;
78 uint8_t ilu_state;
79 uint8_t ilu_prev_state;
80 uint8_t ilu_access;
81 uint8_t ilu_alua;
82 stmf_event_handle_t ilu_event_hdl;
83 struct stmf_i_lu *ilu_next;
84 struct stmf_i_lu *ilu_prev;
85 char *ilu_alias;
86 char ilu_ascii_hex_guid[STMF_GUID_INPUT + 1];
87 kmutex_t ilu_task_lock;
88 uint32_t ilu_task_cntr1;
89 uint32_t ilu_task_cntr2;
90 uint32_t *ilu_cur_task_cntr;
91 uint32_t ilu_ntasks; /* # of tasks in the ilu_task list */
92 uint32_t ilu_ntasks_free; /* # of tasks that are free */
93 uint32_t ilu_ntasks_min_free; /* # minimal free tasks */
94 uint32_t ilu_additional_ref;
95 uint32_t ilu_proxy_registered;
96 uint64_t ilu_reg_msgid;
97 struct stmf_i_scsi_task *ilu_tasks;
98 struct stmf_i_scsi_task *ilu_free_tasks;
99 struct stmf_itl_data *ilu_itl_list;
100 kstat_t *ilu_kstat_info;
101 kstat_t *ilu_kstat_io;
102 kmutex_t ilu_kstat_lock;
103 kcondvar_t ilu_offline_pending_cv;
104
105 /* point to the luid entry in stmf_state.stmf_luid_list */
106 void *ilu_luid;
107 } stmf_i_lu_t;
108
109 /*
110 * ilu_flags
111 */
112 #define ILU_STALL_DEREGISTER 0x0001
113 #define ILU_RESET_ACTIVE 0x0002
114
115 typedef struct stmf_i_port_provider {
116 stmf_port_provider_t *ipp_pp;
117 uint32_t ipp_alloc_size;
118 uint32_t ipp_npps;
119 uint32_t ipp_cb_in_progress:1,
120 ipp_rsvd:31;
121 struct stmf_i_port_provider *ipp_next;
122 struct stmf_pp_data *ipp_ppd;
123 } stmf_i_port_provider_t;
124
125 #define MAX_ILPORT 0x10000
126
127 typedef struct stmf_i_local_port {
128 stmf_local_port_t *ilport_lport;
129 uint32_t ilport_alloc_size;
130 uint32_t ilport_nsessions;
131 struct stmf_i_scsi_session *ilport_ss_list;
132 krwlock_t ilport_lock;
133 struct stmf_i_local_port *ilport_next;
134 struct stmf_i_local_port *ilport_prev;
135 uint8_t ilport_state;
136 uint8_t ilport_prev_state;
137 uint8_t ilport_standby;
138 uint8_t ilport_alua;
139 uint16_t ilport_rtpid; /* relative tpid */
140 uint16_t ilport_proxy_registered;
141 uint64_t ilport_reg_msgid;
142 uint8_t ilport_no_standby_lu;
143 uint32_t ilport_unexpected_comp;
144 stmf_event_handle_t ilport_event_hdl;
145 clock_t ilport_last_online_clock;
146 clock_t ilport_avg_interval;
147 uint32_t ilport_online_times;
148 uint32_t ilport_flags;
149 kstat_t *ilport_kstat_info;
150 kstat_t *ilport_kstat_io;
151 kmutex_t ilport_kstat_lock;
152 char ilport_kstat_tgt_name[STMF_TGT_NAME_LEN];
153 /* which target group this port belongs to in stmf_state.stmf_tg_list */
154 void *ilport_tg;
155 id_t ilport_instance;
156 /* XXX Need something to track all the remote ports also */
157 } stmf_i_local_port_t;
158
159 #define STMF_AVG_ONLINE_INTERVAL (30 * drv_usectohz(1000000))
160
161 #define MAX_IRPORT 0x10000
162
163 typedef struct stmf_i_remote_port {
164 struct scsi_devid_desc *irport_id;
165 kmutex_t irport_mutex;
166 int irport_refcnt;
167 id_t irport_instance;
168 avl_node_t irport_ln;
169 /* number of active read tasks */
170 uint32_t irport_nread_tasks;
171 /* number of active write tasks */
172 uint32_t irport_nwrite_tasks;
173 hrtime_t irport_rdstart_timestamp;
174 hrtime_t irport_rddone_timestamp;
175 hrtime_t irport_wrstart_timestamp;
176 hrtime_t irport_wrdone_timestamp;
177 kstat_t *irport_kstat_info;
178 kstat_t *irport_kstat_io;
179 kstat_t *irport_kstat_estat; /* extended stats */
180 boolean_t irport_info_dirty;
181 } stmf_i_remote_port_t;
182
183 /*
184 * ilport flags
185 */
186 #define ILPORT_FORCED_OFFLINE 0x01
187 #define ILPORT_SS_GOT_INITIAL_LUNS 0x02
188
189 typedef struct stmf_i_scsi_session {
190 stmf_scsi_session_t *iss_ss;
191 uint32_t iss_alloc_size;
192 uint32_t iss_flags;
193 stmf_i_remote_port_t *iss_irport;
194 struct stmf_i_scsi_session *iss_next;
195 /*
196 * Ideally we should maintain 2 maps. One would indicate a new map
197 * which will become available only upon receipt of a REPORT LUN
198 * cmd.
199 */
200 struct stmf_lun_map *iss_sm;
201 /*
202 * which host group the host of this session belongs to in
203 * stmf_state.stmf_hg_list
204 */
205 void *iss_hg;
206 krwlock_t *iss_lockp;
207 time_t iss_creation_time;
208 } stmf_i_scsi_session_t;
209
210 /*
211 * iss flags
212 */
213 #define ISS_LUN_INVENTORY_CHANGED 0x0001
214 #define ISS_RESET_ACTIVE 0x0002
215 #define ISS_BEING_CREATED 0x0004
216 #define ISS_GOT_INITIAL_LUNS 0x0008
217 #define ISS_EVENT_ACTIVE 0x0010
218 #define ISS_NULL_TPTID 0x0020
219
220 #define ITASK_MAX_NCMDS 14
221 #define ITASK_DEFAULT_POLL_TIMEOUT 0
222
223 #define ITASK_TASK_AUDIT_DEPTH 32 /* Must be a power of 2 */
224
225 typedef enum {
226 TE_UNDEFINED,
227 TE_TASK_START,
228 TE_XFER_START,
229 TE_XFER_DONE,
230 TE_SEND_STATUS,
231 TE_SEND_STATUS_DONE,
232 TE_TASK_FREE,
233 TE_TASK_ABORT,
234 TE_TASK_LPORT_ABORTED,
235 TE_TASK_LU_ABORTED,
236 TE_PROCESS_CMD
237 } task_audit_event_t;
238
239 #define CMD_OR_IOF_NA 0xffffffff
240
241 typedef struct stmf_task_audit_rec {
242 task_audit_event_t ta_event;
243 uint32_t ta_cmd_or_iof;
244 uint32_t ta_itask_flags;
245 stmf_data_buf_t *ta_dbuf;
246 timespec_t ta_timestamp;
247 } stmf_task_audit_rec_t;
248
249 struct stmf_worker;
250 typedef struct stmf_i_scsi_task {
251 scsi_task_t *itask_task;
252 uint32_t itask_alloc_size;
253 uint32_t itask_flags;
254 kmutex_t itask_mutex; /* protects flags and lists */
255 uint64_t itask_proxy_msg_id;
256 stmf_data_buf_t *itask_proxy_dbuf;
257 struct stmf_worker *itask_worker;
258 uint32_t *itask_ilu_task_cntr;
259 struct stmf_i_scsi_task *itask_worker_next;
260 struct stmf_i_scsi_task *itask_lu_next;
261 struct stmf_i_scsi_task *itask_lu_prev;
262 struct stmf_i_scsi_task *itask_lu_free_next;
263 struct stmf_itl_data *itask_itl_datap;
264 clock_t itask_start_time; /* abort and normal */
265 /* For now we only support 4 parallel buffers. Should be enough. */
266 stmf_data_buf_t *itask_dbufs[4];
267 clock_t itask_poll_timeout;
268 uint8_t itask_cmd_stack[ITASK_MAX_NCMDS];
269 uint8_t itask_ncmds;
270 uint8_t itask_allocated_buf_map;
271 uint16_t itask_cdb_buf_size;
272
273 /* Task profile data */
274 hrtime_t itask_start_timestamp;
275 hrtime_t itask_done_timestamp;
276 hrtime_t itask_xfer_done_timestamp;
277 hrtime_t itask_waitq_enter_timestamp;
278 hrtime_t itask_waitq_time;
279 hrtime_t itask_lu_read_time;
280 hrtime_t itask_lu_write_time;
281 hrtime_t itask_lport_read_time;
282 hrtime_t itask_lport_write_time;
283 uint64_t itask_read_xfer;
284 uint64_t itask_write_xfer;
285 kmutex_t itask_audit_mutex;
286 uint8_t itask_audit_index;
287 stmf_task_audit_rec_t itask_audit_records[ITASK_TASK_AUDIT_DEPTH];
288 } stmf_i_scsi_task_t;
289
290 #define ITASK_DEFAULT_ABORT_TIMEOUT 5
291
292 /*
293 * Common code to encode an itask onto the worker_task queue is placed
294 * in this macro to simplify future maintenace activity.
295 */
296 #define STMF_ENQUEUE_ITASK(w, i) \
297 ASSERT((itask->itask_flags & ITASK_IN_FREE_LIST) == 0); \
298 ASSERT(mutex_owned(&itask->itask_mutex)); \
299 ASSERT(mutex_owned(&w->worker_lock)); \
300 i->itask_worker_next = NULL; \
301 if (w->worker_task_tail) { \
302 w->worker_task_tail->itask_worker_next = i; \
303 } else { \
304 w->worker_task_head = i; \
305 } \
306 w->worker_task_tail = i; \
307 if (++(w->worker_queue_depth) > w->worker_max_qdepth_pu) { \
308 w->worker_max_qdepth_pu = w->worker_queue_depth; \
309 } \
310 atomic_inc_32(&w->worker_ref_count); \
311 atomic_or_32(&itask->itask_flags, ITASK_IN_WORKER_QUEUE); \
312 i->itask_waitq_enter_timestamp = gethrtime(); \
313 if ((w->worker_flags & STMF_WORKER_ACTIVE) == 0) \
314 cv_signal(&w->worker_cv);
315
316 #define STMF_DEQUEUE_ITASK(w, itask) \
317 ASSERT(mutex_owned(&w->worker_lock)); \
318 if ((itask = w->worker_task_head) != NULL) { \
319 w->worker_task_head = itask->itask_worker_next; \
320 if (w->worker_task_head == NULL) { \
321 w->worker_task_tail = NULL; \
322 } \
323 } else { \
324 w->worker_task_tail = NULL; \
325 }
326
327 /*
328 * itask_flags
329 */
330 #define ITASK_IN_FREE_LIST 0x0001
331 #define ITASK_IN_TRANSITION 0x0002
332 #define ITASK_IN_WORKER_QUEUE 0x0004
333 #define ITASK_BEING_ABORTED 0x0008
334 #define ITASK_BEING_COMPLETED 0x0010
335 #define ITASK_KNOWN_TO_TGT_PORT 0x0020
336 #define ITASK_KNOWN_TO_LU 0x0040
337 #define ITASK_LU_ABORT_CALLED 0x0080
338 #define ITASK_TGT_PORT_ABORT_CALLED 0x0100
339 #define ITASK_DEFAULT_HANDLING 0x0200
340 #define ITASK_CAUSING_LU_RESET 0x0400
341 #define ITASK_CAUSING_TARGET_RESET 0x0800
342 #define ITASK_KSTAT_IN_RUNQ 0x1000
343 #define ITASK_PROXY_TASK 0x2000
344
345 /*
346 * itask cmds.
347 */
348 #define ITASK_CMD_MASK 0x1F
349 #define ITASK_CMD_BUF_NDX(cmd) (((uint8_t)(cmd)) >> 5)
350 #define ITASK_CMD_NEW_TASK 0x1
351 #define ITASK_CMD_DATA_XFER_DONE 0x2
352 #define ITASK_CMD_STATUS_DONE 0x3
353 #define ITASK_CMD_ABORT 0x4
354 #define ITASK_CMD_SEND_STATUS 0x5
355 #define ITASK_CMD_POLL 0x10
356 #define ITASK_CMD_POLL_LU (ITASK_CMD_POLL | 1)
357 #define ITASK_CMD_POLL_LPORT (ITASK_CMD_POLL | 2)
358
359 /*
360 * struct maintained on a per itl basis when the lu registers ITL handle.
361 */
362 typedef struct stmf_itl_data {
363 uint32_t itl_counter;
364 uint8_t itl_flags;
365 uint8_t itl_hdlrm_reason;
366 uint16_t itl_lun;
367 void *itl_handle;
368 struct stmf_i_lu *itl_ilu;
369 struct stmf_i_scsi_session *itl_session;
370 struct stmf_itl_data *itl_next;
371 } stmf_itl_data_t;
372
373 /*
374 * itl flags
375 */
376 #define STMF_ITL_BEING_TERMINATED 0x01
377
378 /*
379 * data structures to maintain provider private data.
380 */
381 typedef struct stmf_pp_data {
382 struct stmf_pp_data *ppd_next;
383 void *ppd_provider;
384 nvlist_t *ppd_nv;
385 uint32_t ppd_lu_provider:1,
386 ppd_port_provider:1,
387 ppd_rsvd:30;
388 uint32_t ppd_alloc_size;
389 uint64_t ppd_token;
390 char ppd_name[8];
391 } stmf_pp_data_t;
392
393 typedef struct stmf_worker {
394 kthread_t *worker_tid;
395 stmf_i_scsi_task_t *worker_task_head;
396 stmf_i_scsi_task_t *worker_task_tail;
397 stmf_i_scsi_task_t *worker_wait_head;
398 stmf_i_scsi_task_t *worker_wait_tail;
399 kmutex_t worker_lock;
400 kcondvar_t worker_cv;
401 uint32_t worker_flags;
402 uint32_t worker_queue_depth; /* ntasks cur queued */
403 uint32_t worker_max_qdepth_pu; /* maxqd / unit time */
404 uint32_t worker_max_sys_qdepth_pu; /* for all workers */
405 uint32_t worker_ref_count; /* # IOs referencing */
406 hrtime_t worker_signal_timestamp;
407 } stmf_worker_t;
408
409 /*
410 * worker flags
411 */
412 #define STMF_WORKER_STARTED 1
413 #define STMF_WORKER_ACTIVE 2
414 #define STMF_WORKER_TERMINATE 4
415
416 /*
417 * data struct for managing transfers.
418 */
419 typedef struct stmf_xfer_data {
420 uint32_t alloc_size; /* Including this struct */
421 uint32_t size_done;
422 uint32_t size_left;
423 uint8_t buf[4];
424 } stmf_xfer_data_t;
425
426 /*
427 * Define frequently used macros
428 */
429 #define TASK_TO_ITASK(x_task) \
430 ((stmf_i_scsi_task_t *)(x_task)->task_stmf_private)
431
432 void stmf_dlun_init();
433 stmf_status_t stmf_dlun_fini();
434 void stmf_worker_init();
435 stmf_status_t stmf_worker_fini();
436 void stmf_task_free(scsi_task_t *task);
437 void stmf_do_task_abort(scsi_task_t *task);
438 void stmf_do_itl_dereg(stmf_lu_t *lu, stmf_itl_data_t *itl,
439 uint8_t hdlrm_reason);
440 void stmf_generate_lu_event(stmf_i_lu_t *ilu, int eventid,
441 void *arg, uint32_t flags);
442 void stmf_generate_lport_event(stmf_i_local_port_t *ilport, int eventid,
443 void *arg, uint32_t flags);
444
445 #ifdef __cplusplus
446 }
447 #endif
448
449 #endif /* _STMF_IMPL_H */