Print this page
NEX-19373 SMB2 file ID re-use can confuse clients
Reviewed by: Matt Barden <matt.barden@nexenta.com>
Reviewed by: Rick McNeal <rick.mcneal@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
NEX-16519 Panic while running IOmeter to a pool through an SMB share
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Matt Barden <matt.barden@nexenta.com>
NEX-15578 SMB2 durable handle redesign
Reviewed by: Matt Barden <matt.barden@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
NEX-5665 SMB2 oplock leases
Reviewed by: Matt Barden <matt.barden@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
NEX-15578 SMB2 durable handle redesign
Reviewed by: Matt Barden <matt.barden@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
NEX-5665 SMB2 oplock leases
Reviewed by: Matt Barden <matt.barden@nexenta.com>
Reviewed by: Evan Layton <evan.layton@nexenta.com>
Reviewed by: Roman Strashkin <roman.strashkin@nexenta.com>
NEX-5599 SMB needs a pointer-based hash table for durable handles
Reviewed by: Gordon Ross <gwr@nexenta.com>
NEX-4458 Incorrect directory listing response for non-UNICODE clients
Reviewed by: Matt Barden <Matt.Barden@nexenta.com>
Reviewed by: Kevin Crowe <kevin.crowe@nexenta.com>
NEX-3787 Sync. up SMB server with: Merge with illumos-gate 12380e1e
NEX-3611 CLONE NEX-3550 Replace smb2_enable with max_protocol
Reviewed by: Yuri Pankov <Yuri.Pankov@nexenta.com>
NEX-2894 Using a date that is outside of the UNIX epoch fails on CIFS
NEX-2781 SMB2 credit handling needs work
SMB-11 SMB2 message parse & dispatch
SMB-12 SMB2 Negotiate Protocol
SMB-13 SMB2 Session Setup
SMB-14 SMB2 Logoff
SMB-15 SMB2 Tree Connect
SMB-16 SMB2 Tree Disconnect
SMB-17 SMB2 Create
SMB-18 SMB2 Close
SMB-19 SMB2 Flush
SMB-20 SMB2 Read
SMB-21 SMB2 Write
SMB-22 SMB2 Lock/Unlock
SMB-23 SMB2 Ioctl
SMB-24 SMB2 Cancel
SMB-25 SMB2 Echo
SMB-26 SMB2 Query Dir
SMB-27 SMB2 Change Notify
SMB-28 SMB2 Query Info
SMB-29 SMB2 Set Info
SMB-30 SMB2 Oplocks
SMB-53 SMB2 Create Context options
(SMB2 code review cleanup 1, 2, 3)
SMB-50 User-mode SMB server
Includes work by these authors:
Thomas Keiser <thomas.keiser@nexenta.com>
Albert Lee <trisk@nexenta.com>
SUP-694 panic on bad mutex in smb_event_wait() - nits
SUP-694 panic on bad mutex in smb_event_wait()
SMB-65 SMB server in non-global zones (data structure changes)
Many things move to the smb_server_t object, and
many functions gain an sv arg (which server).
SMB-65 SMB server in non-global zones (kmem_caches)
common kmem_cache instances across zones
separate GZ-only init from NGZ init
SMB-64 smbsrv workers run at excessively high priority
SMB-48 Panic with smbtorture raw.scan-eamax
re #6854 FindFirstFile,FindFirstFileEx,... are not working correctly on Nexenta CIFS-shares
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/fs/smbsrv/smb_kutil.c
+++ new/usr/src/uts/common/fs/smbsrv/smb_kutil.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
|
↓ open down ↓ |
13 lines elided |
↑ open up ↑ |
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2007, 2010, Oracle and/or its affiliates. All rights reserved.
24 - * Copyright 2015 Nexenta Systems, Inc. All rights reserved.
24 + * Copyright 2018 Nexenta Systems, Inc. All rights reserved.
25 25 */
26 26
27 27 #include <sys/param.h>
28 28 #include <sys/types.h>
29 29 #include <sys/tzfile.h>
30 30 #include <sys/atomic.h>
31 31 #include <sys/time.h>
32 32 #include <sys/spl.h>
33 33 #include <sys/random.h>
34 34 #include <smbsrv/smb_kproto.h>
35 35 #include <smbsrv/smb_fsops.h>
36 36 #include <smbsrv/smbinfo.h>
37 37 #include <smbsrv/smb_xdr.h>
38 38 #include <smbsrv/smb_vops.h>
39 39 #include <smbsrv/smb_idmap.h>
40 40
41 41 #include <sys/sid.h>
42 42 #include <sys/priv_names.h>
43 +#include <sys/bitmap.h>
43 44
44 45 static kmem_cache_t *smb_dtor_cache = NULL;
45 46
46 47 static boolean_t smb_avl_hold(smb_avl_t *);
47 48 static void smb_avl_rele(smb_avl_t *);
48 49
49 50 time_t tzh_leapcnt = 0;
50 51
51 52 struct tm
52 53 *smb_gmtime_r(time_t *clock, struct tm *result);
53 54
54 55 time_t
55 56 smb_timegm(struct tm *tm);
56 57
57 58 struct tm {
58 59 int tm_sec;
59 60 int tm_min;
60 61 int tm_hour;
61 62 int tm_mday;
62 63 int tm_mon;
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
63 64 int tm_year;
64 65 int tm_wday;
65 66 int tm_yday;
66 67 int tm_isdst;
67 68 };
68 69
69 70 static const int days_in_month[] = {
70 71 31, 28, 31, 30, 31, 30, 31, 31, 30, 31, 30, 31
71 72 };
72 73
74 +/*
75 + * Given a UTF-8 string (our internal form everywhere)
76 + * return either the Unicode (UTF-16) length in bytes,
77 + * or the OEM length in bytes. Which we return is
78 + * determined by whether the client supports Unicode.
79 + * This length does NOT include the null.
80 + */
73 81 int
74 82 smb_ascii_or_unicode_strlen(struct smb_request *sr, char *str)
75 83 {
76 84 if (sr->session->dialect >= SMB_VERS_2_BASE ||
77 85 (sr->smb_flg2 & SMB_FLAGS2_UNICODE) != 0)
78 86 return (smb_wcequiv_strlen(str));
79 - return (strlen(str));
87 + return (smb_sbequiv_strlen(str));
80 88 }
81 89
90 +/*
91 + * Given a UTF-8 string (our internal form everywhere)
92 + * return either the Unicode (UTF-16) length in bytes,
93 + * or the OEM length in bytes. Which we return is
94 + * determined by whether the client supports Unicode.
95 + * This length DOES include the null.
96 + */
82 97 int
83 98 smb_ascii_or_unicode_strlen_null(struct smb_request *sr, char *str)
84 99 {
85 100 if (sr->session->dialect >= SMB_VERS_2_BASE ||
86 101 (sr->smb_flg2 & SMB_FLAGS2_UNICODE) != 0)
87 102 return (smb_wcequiv_strlen(str) + 2);
88 - return (strlen(str) + 1);
103 + return (smb_sbequiv_strlen(str) + 1);
89 104 }
90 105
91 106 int
92 107 smb_ascii_or_unicode_null_len(struct smb_request *sr)
93 108 {
94 109 if (sr->session->dialect >= SMB_VERS_2_BASE ||
95 110 (sr->smb_flg2 & SMB_FLAGS2_UNICODE) != 0)
96 111 return (2);
97 112 return (1);
98 113 }
99 114
100 115 /*
101 116 *
102 117 * Convert old-style (DOS, LanMan) wildcard strings to NT style.
103 118 * This should ONLY happen to patterns that come from old clients,
104 119 * meaning dialect LANMAN2_1 etc. (dialect < NT_LM_0_12).
105 120 *
106 121 * ? is converted to >
107 122 * * is converted to < if it is followed by .
108 123 * . is converted to " if it is followed by ? or * or end of pattern
109 124 *
110 125 * Note: modifies pattern in place.
111 126 */
112 127 void
113 128 smb_convert_wildcards(char *pattern)
114 129 {
115 130 char *p;
116 131
117 132 for (p = pattern; *p != '\0'; p++) {
118 133 switch (*p) {
119 134 case '?':
120 135 *p = '>';
121 136 break;
122 137 case '*':
123 138 if (p[1] == '.')
124 139 *p = '<';
125 140 break;
126 141 case '.':
127 142 if (p[1] == '?' || p[1] == '*' || p[1] == '\0')
128 143 *p = '\"';
129 144 break;
130 145 }
131 146 }
132 147 }
133 148
134 149 /*
135 150 * smb_sattr_check
136 151 *
137 152 * Check file attributes against a search attribute (sattr) mask.
138 153 *
139 154 * Normal files, which includes READONLY and ARCHIVE, always pass
140 155 * this check. If the DIRECTORY, HIDDEN or SYSTEM special attributes
141 156 * are set then they must appear in the search mask. The special
142 157 * attributes are inclusive, i.e. all special attributes that appear
143 158 * in sattr must also appear in the file attributes for the check to
144 159 * pass.
145 160 *
146 161 * The following examples show how this works:
147 162 *
148 163 * fileA: READONLY
149 164 * fileB: 0 (no attributes = normal file)
150 165 * fileC: READONLY, ARCHIVE
151 166 * fileD: HIDDEN
152 167 * fileE: READONLY, HIDDEN, SYSTEM
153 168 * dirA: DIRECTORY
154 169 *
155 170 * search attribute: 0
156 171 * Returns: fileA, fileB and fileC.
157 172 * search attribute: HIDDEN
158 173 * Returns: fileA, fileB, fileC and fileD.
159 174 * search attribute: SYSTEM
160 175 * Returns: fileA, fileB and fileC.
161 176 * search attribute: DIRECTORY
162 177 * Returns: fileA, fileB, fileC and dirA.
163 178 * search attribute: HIDDEN and SYSTEM
164 179 * Returns: fileA, fileB, fileC, fileD and fileE.
165 180 *
166 181 * Returns true if the file and sattr match; otherwise, returns false.
167 182 */
168 183 boolean_t
169 184 smb_sattr_check(uint16_t dosattr, uint16_t sattr)
170 185 {
171 186 if ((dosattr & FILE_ATTRIBUTE_DIRECTORY) &&
172 187 !(sattr & FILE_ATTRIBUTE_DIRECTORY))
173 188 return (B_FALSE);
174 189
175 190 if ((dosattr & FILE_ATTRIBUTE_HIDDEN) &&
176 191 !(sattr & FILE_ATTRIBUTE_HIDDEN))
177 192 return (B_FALSE);
178 193
179 194 if ((dosattr & FILE_ATTRIBUTE_SYSTEM) &&
180 195 !(sattr & FILE_ATTRIBUTE_SYSTEM))
181 196 return (B_FALSE);
182 197
183 198 return (B_TRUE);
184 199 }
185 200
186 201 time_t
187 202 smb_get_boottime(void)
188 203 {
189 204 extern time_t boot_time;
190 205 zone_t *z = curzone;
191 206
192 207 /* Unfortunately, the GZ doesn't set zone_boot_time. */
193 208 if (z->zone_id == GLOBAL_ZONEID)
194 209 return (boot_time);
195 210
196 211 return (z->zone_boot_time);
197 212 }
198 213
199 214 /*
200 215 * smb_idpool_increment
201 216 *
202 217 * This function increments the ID pool by doubling the current size. This
203 218 * function assumes the caller entered the mutex of the pool.
204 219 */
205 220 static int
206 221 smb_idpool_increment(
207 222 smb_idpool_t *pool)
208 223 {
209 224 uint8_t *new_pool;
210 225 uint32_t new_size;
211 226
212 227 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
213 228
214 229 new_size = pool->id_size * 2;
215 230 if (new_size <= SMB_IDPOOL_MAX_SIZE) {
216 231 new_pool = kmem_alloc(new_size / 8, KM_NOSLEEP);
217 232 if (new_pool) {
218 233 bzero(new_pool, new_size / 8);
219 234 bcopy(pool->id_pool, new_pool, pool->id_size / 8);
220 235 kmem_free(pool->id_pool, pool->id_size / 8);
221 236 pool->id_pool = new_pool;
222 237 pool->id_free_counter += new_size - pool->id_size;
223 238 pool->id_max_free_counter += new_size - pool->id_size;
224 239 pool->id_size = new_size;
225 240 pool->id_idx_msk = (new_size / 8) - 1;
226 241 if (new_size >= SMB_IDPOOL_MAX_SIZE) {
227 242 /* id -1 made unavailable */
228 243 pool->id_pool[pool->id_idx_msk] = 0x80;
229 244 pool->id_free_counter--;
230 245 pool->id_max_free_counter--;
231 246 }
232 247 return (0);
233 248 }
234 249 }
235 250 return (-1);
236 251 }
237 252
238 253 /*
239 254 * smb_idpool_constructor
240 255 *
241 256 * This function initializes the pool structure provided.
242 257 */
243 258 int
244 259 smb_idpool_constructor(
245 260 smb_idpool_t *pool)
246 261 {
247 262
248 263 ASSERT(pool->id_magic != SMB_IDPOOL_MAGIC);
249 264
250 265 pool->id_size = SMB_IDPOOL_MIN_SIZE;
251 266 pool->id_idx_msk = (SMB_IDPOOL_MIN_SIZE / 8) - 1;
252 267 pool->id_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
253 268 pool->id_max_free_counter = SMB_IDPOOL_MIN_SIZE - 1;
254 269 pool->id_bit = 0x02;
255 270 pool->id_bit_idx = 1;
256 271 pool->id_idx = 0;
257 272 pool->id_pool = (uint8_t *)kmem_alloc((SMB_IDPOOL_MIN_SIZE / 8),
258 273 KM_SLEEP);
259 274 bzero(pool->id_pool, (SMB_IDPOOL_MIN_SIZE / 8));
260 275 /* -1 id made unavailable */
261 276 pool->id_pool[0] = 0x01; /* id 0 made unavailable */
262 277 mutex_init(&pool->id_mutex, NULL, MUTEX_DEFAULT, NULL);
263 278 pool->id_magic = SMB_IDPOOL_MAGIC;
264 279 return (0);
265 280 }
266 281
267 282 /*
268 283 * smb_idpool_destructor
269 284 *
270 285 * This function tears down and frees the resources associated with the
271 286 * pool provided.
272 287 */
273 288 void
274 289 smb_idpool_destructor(
275 290 smb_idpool_t *pool)
276 291 {
277 292 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
278 293 ASSERT(pool->id_free_counter == pool->id_max_free_counter);
279 294 pool->id_magic = (uint32_t)~SMB_IDPOOL_MAGIC;
280 295 mutex_destroy(&pool->id_mutex);
281 296 kmem_free(pool->id_pool, (size_t)(pool->id_size / 8));
282 297 }
283 298
284 299 /*
285 300 * smb_idpool_alloc
286 301 *
287 302 * This function allocates an ID from the pool provided.
288 303 */
289 304 int
290 305 smb_idpool_alloc(
291 306 smb_idpool_t *pool,
292 307 uint16_t *id)
293 308 {
294 309 uint32_t i;
295 310 uint8_t bit;
296 311 uint8_t bit_idx;
297 312 uint8_t byte;
298 313
299 314 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
300 315
301 316 mutex_enter(&pool->id_mutex);
302 317 if ((pool->id_free_counter == 0) && smb_idpool_increment(pool)) {
303 318 mutex_exit(&pool->id_mutex);
304 319 return (-1);
305 320 }
306 321
307 322 i = pool->id_size;
308 323 while (i) {
309 324 bit = pool->id_bit;
310 325 bit_idx = pool->id_bit_idx;
|
↓ open down ↓ |
212 lines elided |
↑ open up ↑ |
311 326 byte = pool->id_pool[pool->id_idx];
312 327 while (bit) {
313 328 if (byte & bit) {
314 329 bit = bit << 1;
315 330 bit_idx++;
316 331 continue;
317 332 }
318 333 pool->id_pool[pool->id_idx] |= bit;
319 334 *id = (uint16_t)(pool->id_idx * 8 + (uint32_t)bit_idx);
320 335 pool->id_free_counter--;
321 - pool->id_bit = bit;
322 - pool->id_bit_idx = bit_idx;
336 + /*
337 + * Leave position at next bit to allocate,
338 + * so we don't keep re-using the last in an
339 + * alloc/free/alloc/free sequence. Doing
340 + * that can confuse some SMB clients.
341 + */
342 + if (bit & 0x80) {
343 + pool->id_bit = 1;
344 + pool->id_bit_idx = 0;
345 + pool->id_idx++;
346 + pool->id_idx &= pool->id_idx_msk;
347 + } else {
348 + pool->id_bit = (bit << 1);
349 + pool->id_bit_idx = bit_idx + 1;
350 + /* keep id_idx */
351 + }
323 352 mutex_exit(&pool->id_mutex);
324 353 return (0);
325 354 }
326 355 pool->id_bit = 1;
327 356 pool->id_bit_idx = 0;
328 357 pool->id_idx++;
329 358 pool->id_idx &= pool->id_idx_msk;
330 359 --i;
331 360 }
332 361 /*
333 362 * This section of code shouldn't be reached. If there are IDs
334 363 * available and none could be found there's a problem.
335 364 */
336 365 ASSERT(0);
337 366 mutex_exit(&pool->id_mutex);
338 367 return (-1);
339 368 }
340 369
341 370 /*
342 371 * smb_idpool_free
343 372 *
344 373 * This function frees the ID provided.
345 374 */
346 375 void
347 376 smb_idpool_free(
348 377 smb_idpool_t *pool,
349 378 uint16_t id)
350 379 {
351 380 ASSERT(pool->id_magic == SMB_IDPOOL_MAGIC);
352 381 ASSERT(id != 0);
353 382 ASSERT(id != 0xFFFF);
354 383
355 384 mutex_enter(&pool->id_mutex);
356 385 if (pool->id_pool[id >> 3] & (1 << (id & 7))) {
357 386 pool->id_pool[id >> 3] &= ~(1 << (id & 7));
358 387 pool->id_free_counter++;
359 388 ASSERT(pool->id_free_counter <= pool->id_max_free_counter);
360 389 mutex_exit(&pool->id_mutex);
361 390 return;
362 391 }
363 392 /* Freeing a free ID. */
364 393 ASSERT(0);
365 394 mutex_exit(&pool->id_mutex);
366 395 }
367 396
368 397 /*
369 398 * Initialize the llist delete queue object cache.
370 399 */
371 400 void
372 401 smb_llist_init(void)
373 402 {
374 403 if (smb_dtor_cache != NULL)
375 404 return;
376 405
377 406 smb_dtor_cache = kmem_cache_create("smb_dtor_cache",
378 407 sizeof (smb_dtor_t), 8, NULL, NULL, NULL, NULL, NULL, 0);
379 408 }
380 409
381 410 /*
382 411 * Destroy the llist delete queue object cache.
383 412 */
384 413 void
385 414 smb_llist_fini(void)
386 415 {
387 416 if (smb_dtor_cache != NULL) {
388 417 kmem_cache_destroy(smb_dtor_cache);
389 418 smb_dtor_cache = NULL;
390 419 }
391 420 }
392 421
393 422 /*
394 423 * smb_llist_constructor
395 424 *
396 425 * This function initializes a locked list.
397 426 */
398 427 void
399 428 smb_llist_constructor(
400 429 smb_llist_t *ll,
401 430 size_t size,
402 431 size_t offset)
403 432 {
404 433 rw_init(&ll->ll_lock, NULL, RW_DEFAULT, NULL);
405 434 mutex_init(&ll->ll_mutex, NULL, MUTEX_DEFAULT, NULL);
406 435 list_create(&ll->ll_list, size, offset);
407 436 list_create(&ll->ll_deleteq, sizeof (smb_dtor_t),
408 437 offsetof(smb_dtor_t, dt_lnd));
409 438 ll->ll_count = 0;
410 439 ll->ll_wrop = 0;
411 440 ll->ll_deleteq_count = 0;
412 441 ll->ll_flushing = B_FALSE;
413 442 }
414 443
415 444 /*
416 445 * Flush the delete queue and destroy a locked list.
417 446 */
418 447 void
419 448 smb_llist_destructor(
420 449 smb_llist_t *ll)
421 450 {
422 451 smb_llist_flush(ll);
423 452
424 453 ASSERT(ll->ll_count == 0);
425 454 ASSERT(ll->ll_deleteq_count == 0);
426 455
427 456 rw_destroy(&ll->ll_lock);
428 457 list_destroy(&ll->ll_list);
429 458 list_destroy(&ll->ll_deleteq);
430 459 mutex_destroy(&ll->ll_mutex);
431 460 }
432 461
433 462 /*
434 463 * Post an object to the delete queue. The delete queue will be processed
435 464 * during list exit or list destruction. Objects are often posted for
436 465 * deletion during list iteration (while the list is locked) but that is
437 466 * not required, and an object can be posted at any time.
438 467 */
439 468 void
440 469 smb_llist_post(smb_llist_t *ll, void *object, smb_dtorproc_t dtorproc)
441 470 {
442 471 smb_dtor_t *dtor;
443 472
444 473 ASSERT((object != NULL) && (dtorproc != NULL));
445 474
446 475 dtor = kmem_cache_alloc(smb_dtor_cache, KM_SLEEP);
447 476 bzero(dtor, sizeof (smb_dtor_t));
|
↓ open down ↓ |
115 lines elided |
↑ open up ↑ |
448 477 dtor->dt_magic = SMB_DTOR_MAGIC;
449 478 dtor->dt_object = object;
450 479 dtor->dt_proc = dtorproc;
451 480
452 481 mutex_enter(&ll->ll_mutex);
453 482 list_insert_tail(&ll->ll_deleteq, dtor);
454 483 ++ll->ll_deleteq_count;
455 484 mutex_exit(&ll->ll_mutex);
456 485 }
457 486
487 +void
488 +smb_llist_enter(smb_llist_t *ll, krw_t mode)
489 +{
490 + rw_enter(&ll->ll_lock, mode);
491 +}
492 +
458 493 /*
459 494 * Exit the list lock and process the delete queue.
460 495 */
461 496 void
462 497 smb_llist_exit(smb_llist_t *ll)
463 498 {
464 499 rw_exit(&ll->ll_lock);
465 500 smb_llist_flush(ll);
466 501 }
467 502
468 503 /*
469 504 * Flush the list delete queue. The mutex is dropped across the destructor
470 505 * call in case this leads to additional objects being posted to the delete
471 506 * queue.
472 507 */
473 508 void
474 509 smb_llist_flush(smb_llist_t *ll)
475 510 {
476 511 smb_dtor_t *dtor;
477 512
478 513 mutex_enter(&ll->ll_mutex);
479 514 if (ll->ll_flushing) {
480 515 mutex_exit(&ll->ll_mutex);
481 516 return;
482 517 }
483 518 ll->ll_flushing = B_TRUE;
484 519
485 520 dtor = list_head(&ll->ll_deleteq);
486 521 while (dtor != NULL) {
487 522 SMB_DTOR_VALID(dtor);
488 523 ASSERT((dtor->dt_object != NULL) && (dtor->dt_proc != NULL));
489 524 list_remove(&ll->ll_deleteq, dtor);
490 525 --ll->ll_deleteq_count;
491 526 mutex_exit(&ll->ll_mutex);
492 527
493 528 dtor->dt_proc(dtor->dt_object);
494 529
495 530 dtor->dt_magic = (uint32_t)~SMB_DTOR_MAGIC;
496 531 kmem_cache_free(smb_dtor_cache, dtor);
497 532 mutex_enter(&ll->ll_mutex);
498 533 dtor = list_head(&ll->ll_deleteq);
499 534 }
500 535 ll->ll_flushing = B_FALSE;
501 536
502 537 mutex_exit(&ll->ll_mutex);
503 538 }
504 539
505 540 /*
506 541 * smb_llist_upgrade
507 542 *
508 543 * This function tries to upgrade the lock of the locked list. It assumes the
509 544 * locked has already been entered in RW_READER mode. It first tries using the
510 545 * Solaris function rw_tryupgrade(). If that call fails the lock is released
511 546 * and reentered in RW_WRITER mode. In that last case a window is opened during
512 547 * which the contents of the list may have changed. The return code indicates
513 548 * whether or not the list was modified when the lock was exited.
514 549 */
515 550 int smb_llist_upgrade(
516 551 smb_llist_t *ll)
517 552 {
518 553 uint64_t wrop;
519 554
520 555 if (rw_tryupgrade(&ll->ll_lock) != 0) {
521 556 return (0);
522 557 }
523 558 wrop = ll->ll_wrop;
524 559 rw_exit(&ll->ll_lock);
525 560 rw_enter(&ll->ll_lock, RW_WRITER);
526 561 return (wrop != ll->ll_wrop);
527 562 }
528 563
529 564 /*
530 565 * smb_llist_insert_head
531 566 *
532 567 * This function inserts the object passed a the beginning of the list. This
533 568 * function assumes the lock of the list has already been entered.
534 569 */
535 570 void
536 571 smb_llist_insert_head(
537 572 smb_llist_t *ll,
538 573 void *obj)
539 574 {
540 575 list_insert_head(&ll->ll_list, obj);
541 576 ++ll->ll_wrop;
542 577 ++ll->ll_count;
543 578 }
544 579
545 580 /*
546 581 * smb_llist_insert_tail
547 582 *
548 583 * This function appends to the object passed to the list. This function assumes
549 584 * the lock of the list has already been entered.
550 585 *
551 586 */
552 587 void
553 588 smb_llist_insert_tail(
554 589 smb_llist_t *ll,
555 590 void *obj)
556 591 {
557 592 list_insert_tail(&ll->ll_list, obj);
558 593 ++ll->ll_wrop;
559 594 ++ll->ll_count;
560 595 }
561 596
562 597 /*
563 598 * smb_llist_remove
564 599 *
565 600 * This function removes the object passed from the list. This function assumes
566 601 * the lock of the list has already been entered.
567 602 */
568 603 void
569 604 smb_llist_remove(
570 605 smb_llist_t *ll,
571 606 void *obj)
572 607 {
573 608 list_remove(&ll->ll_list, obj);
574 609 ++ll->ll_wrop;
575 610 --ll->ll_count;
576 611 }
577 612
578 613 /*
579 614 * smb_llist_get_count
580 615 *
581 616 * This function returns the number of elements in the specified list.
582 617 */
583 618 uint32_t
584 619 smb_llist_get_count(
585 620 smb_llist_t *ll)
586 621 {
587 622 return (ll->ll_count);
588 623 }
589 624
590 625 /*
591 626 * smb_slist_constructor
592 627 *
593 628 * Synchronized list constructor.
594 629 */
595 630 void
596 631 smb_slist_constructor(
597 632 smb_slist_t *sl,
598 633 size_t size,
599 634 size_t offset)
600 635 {
601 636 mutex_init(&sl->sl_mutex, NULL, MUTEX_DEFAULT, NULL);
602 637 cv_init(&sl->sl_cv, NULL, CV_DEFAULT, NULL);
603 638 list_create(&sl->sl_list, size, offset);
604 639 sl->sl_count = 0;
605 640 sl->sl_waiting = B_FALSE;
606 641 }
607 642
608 643 /*
609 644 * smb_slist_destructor
610 645 *
611 646 * Synchronized list destructor.
612 647 */
613 648 void
614 649 smb_slist_destructor(
615 650 smb_slist_t *sl)
616 651 {
617 652 VERIFY(sl->sl_count == 0);
618 653
619 654 mutex_destroy(&sl->sl_mutex);
620 655 cv_destroy(&sl->sl_cv);
621 656 list_destroy(&sl->sl_list);
622 657 }
623 658
624 659 /*
625 660 * smb_slist_insert_head
626 661 *
627 662 * This function inserts the object passed a the beginning of the list.
628 663 */
629 664 void
630 665 smb_slist_insert_head(
631 666 smb_slist_t *sl,
632 667 void *obj)
633 668 {
634 669 mutex_enter(&sl->sl_mutex);
635 670 list_insert_head(&sl->sl_list, obj);
636 671 ++sl->sl_count;
637 672 mutex_exit(&sl->sl_mutex);
638 673 }
639 674
640 675 /*
641 676 * smb_slist_insert_tail
642 677 *
643 678 * This function appends the object passed to the list.
644 679 */
645 680 void
646 681 smb_slist_insert_tail(
647 682 smb_slist_t *sl,
648 683 void *obj)
649 684 {
650 685 mutex_enter(&sl->sl_mutex);
651 686 list_insert_tail(&sl->sl_list, obj);
652 687 ++sl->sl_count;
653 688 mutex_exit(&sl->sl_mutex);
654 689 }
655 690
656 691 /*
657 692 * smb_llist_remove
658 693 *
659 694 * This function removes the object passed by the caller from the list.
660 695 */
661 696 void
662 697 smb_slist_remove(
663 698 smb_slist_t *sl,
664 699 void *obj)
665 700 {
666 701 mutex_enter(&sl->sl_mutex);
667 702 list_remove(&sl->sl_list, obj);
668 703 if ((--sl->sl_count == 0) && (sl->sl_waiting)) {
669 704 sl->sl_waiting = B_FALSE;
670 705 cv_broadcast(&sl->sl_cv);
671 706 }
672 707 mutex_exit(&sl->sl_mutex);
673 708 }
674 709
675 710 /*
676 711 * smb_slist_move_tail
677 712 *
678 713 * This function transfers all the contents of the synchronized list to the
679 714 * list_t provided. It returns the number of objects transferred.
680 715 */
681 716 uint32_t
682 717 smb_slist_move_tail(
683 718 list_t *lst,
684 719 smb_slist_t *sl)
685 720 {
686 721 uint32_t rv;
687 722
688 723 mutex_enter(&sl->sl_mutex);
689 724 rv = sl->sl_count;
690 725 if (sl->sl_count) {
691 726 list_move_tail(lst, &sl->sl_list);
692 727 sl->sl_count = 0;
693 728 if (sl->sl_waiting) {
694 729 sl->sl_waiting = B_FALSE;
695 730 cv_broadcast(&sl->sl_cv);
696 731 }
697 732 }
698 733 mutex_exit(&sl->sl_mutex);
699 734 return (rv);
700 735 }
701 736
702 737 /*
703 738 * smb_slist_obj_move
704 739 *
705 740 * This function moves an object from one list to the end of the other list. It
706 741 * assumes the mutex of each list has been entered.
707 742 */
708 743 void
709 744 smb_slist_obj_move(
710 745 smb_slist_t *dst,
711 746 smb_slist_t *src,
712 747 void *obj)
713 748 {
714 749 ASSERT(dst->sl_list.list_offset == src->sl_list.list_offset);
715 750 ASSERT(dst->sl_list.list_size == src->sl_list.list_size);
716 751
717 752 list_remove(&src->sl_list, obj);
718 753 list_insert_tail(&dst->sl_list, obj);
719 754 dst->sl_count++;
720 755 src->sl_count--;
721 756 if ((src->sl_count == 0) && (src->sl_waiting)) {
722 757 src->sl_waiting = B_FALSE;
723 758 cv_broadcast(&src->sl_cv);
724 759 }
725 760 }
726 761
727 762 /*
728 763 * smb_slist_wait_for_empty
729 764 *
730 765 * This function waits for a list to be emptied.
731 766 */
732 767 void
733 768 smb_slist_wait_for_empty(
734 769 smb_slist_t *sl)
735 770 {
736 771 mutex_enter(&sl->sl_mutex);
737 772 while (sl->sl_count) {
738 773 sl->sl_waiting = B_TRUE;
739 774 cv_wait(&sl->sl_cv, &sl->sl_mutex);
740 775 }
741 776 mutex_exit(&sl->sl_mutex);
742 777 }
743 778
744 779 /*
745 780 * smb_slist_exit
746 781 *
747 782 * This function exits the muetx of the list and signal the condition variable
748 783 * if the list is empty.
749 784 */
750 785 void
751 786 smb_slist_exit(smb_slist_t *sl)
752 787 {
753 788 if ((sl->sl_count == 0) && (sl->sl_waiting)) {
754 789 sl->sl_waiting = B_FALSE;
755 790 cv_broadcast(&sl->sl_cv);
756 791 }
757 792 mutex_exit(&sl->sl_mutex);
758 793 }
759 794
760 795 /* smb_thread_... moved to smb_thread.c */
761 796
762 797 /*
763 798 * smb_rwx_init
764 799 */
765 800 void
766 801 smb_rwx_init(
767 802 smb_rwx_t *rwx)
768 803 {
769 804 bzero(rwx, sizeof (smb_rwx_t));
770 805 cv_init(&rwx->rwx_cv, NULL, CV_DEFAULT, NULL);
771 806 mutex_init(&rwx->rwx_mutex, NULL, MUTEX_DEFAULT, NULL);
772 807 rw_init(&rwx->rwx_lock, NULL, RW_DEFAULT, NULL);
773 808 }
774 809
775 810 /*
776 811 * smb_rwx_destroy
777 812 */
778 813 void
779 814 smb_rwx_destroy(
780 815 smb_rwx_t *rwx)
|
↓ open down ↓ |
313 lines elided |
↑ open up ↑ |
781 816 {
782 817 mutex_destroy(&rwx->rwx_mutex);
783 818 cv_destroy(&rwx->rwx_cv);
784 819 rw_destroy(&rwx->rwx_lock);
785 820 }
786 821
787 822 /*
788 823 * smb_rwx_rwexit
789 824 */
790 825 void
791 -smb_rwx_rwexit(
792 - smb_rwx_t *rwx)
826 +smb_rwx_rwenter(smb_rwx_t *rwx, krw_t mode)
793 827 {
794 - if (rw_write_held(&rwx->rwx_lock)) {
795 - ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
796 - mutex_enter(&rwx->rwx_mutex);
797 - if (rwx->rwx_waiting) {
798 - rwx->rwx_waiting = B_FALSE;
799 - cv_broadcast(&rwx->rwx_cv);
800 - }
801 - mutex_exit(&rwx->rwx_mutex);
802 - }
803 - rw_exit(&rwx->rwx_lock);
828 + rw_enter(&rwx->rwx_lock, mode);
804 829 }
805 830
806 831 /*
807 - * smb_rwx_rwupgrade
832 + * smb_rwx_rwexit
808 833 */
809 -krw_t
810 -smb_rwx_rwupgrade(
834 +void
835 +smb_rwx_rwexit(
811 836 smb_rwx_t *rwx)
812 837 {
813 - if (rw_write_held(&rwx->rwx_lock)) {
814 - ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
815 - return (RW_WRITER);
816 - }
817 - if (!rw_tryupgrade(&rwx->rwx_lock)) {
818 - rw_exit(&rwx->rwx_lock);
819 - rw_enter(&rwx->rwx_lock, RW_WRITER);
820 - }
821 - return (RW_READER);
838 + rw_exit(&rwx->rwx_lock);
822 839 }
823 840
824 -/*
825 - * smb_rwx_rwrestore
826 - */
827 -void
828 -smb_rwx_rwdowngrade(
829 - smb_rwx_t *rwx,
830 - krw_t mode)
831 -{
832 - ASSERT(rw_write_held(&rwx->rwx_lock));
833 - ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
834 841
835 - if (mode == RW_WRITER) {
836 - return;
837 - }
838 - ASSERT(mode == RW_READER);
839 - mutex_enter(&rwx->rwx_mutex);
840 - if (rwx->rwx_waiting) {
841 - rwx->rwx_waiting = B_FALSE;
842 - cv_broadcast(&rwx->rwx_cv);
843 - }
844 - mutex_exit(&rwx->rwx_mutex);
845 - rw_downgrade(&rwx->rwx_lock);
846 -}
847 -
848 842 /*
849 - * smb_rwx_wait
843 + * smb_rwx_cvwait
850 844 *
851 - * This function assumes the smb_rwx lock was enter in RW_READER or RW_WRITER
845 + * Wait on rwx->rw_cv, dropping the rw lock and retake after wakeup.
846 + * Assumes the smb_rwx lock was entered in RW_READER or RW_WRITER
852 847 * mode. It will:
853 848 *
854 849 * 1) release the lock and save its current mode.
855 - * 2) wait until the condition variable is signaled. This can happen for
856 - * 2 reasons: When a writer releases the lock or when the time out (if
857 - * provided) expires.
850 + * 2) wait until the condition variable is signaled.
858 851 * 3) re-acquire the lock in the mode saved in (1).
852 + *
853 + * Lock order: rwlock, mutex
859 854 */
860 855 int
861 -smb_rwx_rwwait(
856 +smb_rwx_cvwait(
862 857 smb_rwx_t *rwx,
863 858 clock_t timeout)
864 859 {
865 860 krw_t mode;
866 861 int rc = 1;
867 862
868 - mutex_enter(&rwx->rwx_mutex);
869 - rwx->rwx_waiting = B_TRUE;
870 - mutex_exit(&rwx->rwx_mutex);
871 -
872 863 if (rw_write_held(&rwx->rwx_lock)) {
873 864 ASSERT(rw_owner(&rwx->rwx_lock) == curthread);
874 865 mode = RW_WRITER;
875 866 } else {
876 867 ASSERT(rw_read_held(&rwx->rwx_lock));
877 868 mode = RW_READER;
878 869 }
879 - rw_exit(&rwx->rwx_lock);
880 870
881 871 mutex_enter(&rwx->rwx_mutex);
882 - if (rwx->rwx_waiting) {
883 - if (timeout == -1) {
884 - cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex);
885 - } else {
886 - rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex,
887 - timeout, TR_CLOCK_TICK);
888 - }
872 + rw_exit(&rwx->rwx_lock);
873 +
874 + rwx->rwx_waiting = B_TRUE;
875 + if (timeout == -1) {
876 + cv_wait(&rwx->rwx_cv, &rwx->rwx_mutex);
877 + } else {
878 + rc = cv_reltimedwait(&rwx->rwx_cv, &rwx->rwx_mutex,
879 + timeout, TR_CLOCK_TICK);
889 880 }
890 881 mutex_exit(&rwx->rwx_mutex);
891 882
892 883 rw_enter(&rwx->rwx_lock, mode);
893 884 return (rc);
894 885 }
895 886
887 +/*
888 + * smb_rwx_cvbcast
889 + *
890 + * Wake up threads waiting on rx_cv
891 + * The rw lock may or may not be held.
892 + * The mutex MUST NOT be held.
893 + */
894 +void
895 +smb_rwx_cvbcast(smb_rwx_t *rwx)
896 +{
897 + mutex_enter(&rwx->rwx_mutex);
898 + if (rwx->rwx_waiting) {
899 + rwx->rwx_waiting = B_FALSE;
900 + cv_broadcast(&rwx->rwx_cv);
901 + }
902 + mutex_exit(&rwx->rwx_mutex);
903 +}
904 +
896 905 /* smb_idmap_... moved to smb_idmap.c */
897 906
898 907 uint64_t
899 908 smb_time_unix_to_nt(timestruc_t *unix_time)
900 909 {
901 910 uint64_t nt_time;
902 911
903 912 if ((unix_time->tv_sec == 0) && (unix_time->tv_nsec == 0))
904 913 return (0);
905 914
906 915 nt_time = unix_time->tv_sec;
907 916 nt_time *= 10000000; /* seconds to 100ns */
908 917 nt_time += unix_time->tv_nsec / 100;
909 918 return (nt_time + NT_TIME_BIAS);
910 919 }
911 920
912 921 void
913 922 smb_time_nt_to_unix(uint64_t nt_time, timestruc_t *unix_time)
914 923 {
915 924 uint32_t seconds;
916 925
917 926 ASSERT(unix_time);
918 927
919 928 if ((nt_time == 0) || (nt_time == -1)) {
920 929 unix_time->tv_sec = 0;
921 930 unix_time->tv_nsec = 0;
922 931 return;
923 932 }
924 933
925 934 /*
926 935 * Can't represent times less than or equal NT_TIME_BIAS,
927 936 * so convert them to the oldest date we can store.
928 937 * Note that time zero is "special" being converted
929 938 * both directions as 0:0 (unix-to-nt, nt-to-unix).
930 939 */
931 940 if (nt_time <= NT_TIME_BIAS) {
932 941 unix_time->tv_sec = 0;
933 942 unix_time->tv_nsec = 100;
934 943 return;
935 944 }
936 945
937 946 nt_time -= NT_TIME_BIAS;
938 947 seconds = nt_time / 10000000;
939 948 unix_time->tv_sec = seconds;
940 949 unix_time->tv_nsec = (nt_time % 10000000) * 100;
941 950 }
942 951
943 952 /*
944 953 * smb_time_gmt_to_local, smb_time_local_to_gmt
945 954 *
946 955 * Apply the gmt offset to convert between local time and gmt
947 956 */
948 957 int32_t
949 958 smb_time_gmt_to_local(smb_request_t *sr, int32_t gmt)
950 959 {
951 960 if ((gmt == 0) || (gmt == -1))
952 961 return (0);
953 962
954 963 return (gmt - sr->sr_gmtoff);
955 964 }
956 965
957 966 int32_t
958 967 smb_time_local_to_gmt(smb_request_t *sr, int32_t local)
959 968 {
960 969 if ((local == 0) || (local == -1))
961 970 return (0);
962 971
963 972 return (local + sr->sr_gmtoff);
964 973 }
965 974
966 975
967 976 /*
968 977 * smb_time_dos_to_unix
969 978 *
970 979 * Convert SMB_DATE & SMB_TIME values to a unix timestamp.
971 980 *
972 981 * A date/time field of 0 means that that server file system
973 982 * assigned value need not be changed. The behaviour when the
974 983 * date/time field is set to -1 is not documented but is
975 984 * generally treated like 0.
976 985 * If date or time is 0 or -1 the unix time is returned as 0
977 986 * so that the caller can identify and handle this special case.
978 987 */
979 988 int32_t
980 989 smb_time_dos_to_unix(int16_t date, int16_t time)
981 990 {
982 991 struct tm atm;
983 992
984 993 if (((date == 0) || (time == 0)) ||
985 994 ((date == -1) || (time == -1))) {
986 995 return (0);
987 996 }
988 997
989 998 atm.tm_year = ((date >> 9) & 0x3F) + 80;
990 999 atm.tm_mon = ((date >> 5) & 0x0F) - 1;
991 1000 atm.tm_mday = ((date >> 0) & 0x1F);
992 1001 atm.tm_hour = ((time >> 11) & 0x1F);
993 1002 atm.tm_min = ((time >> 5) & 0x3F);
994 1003 atm.tm_sec = ((time >> 0) & 0x1F) << 1;
995 1004
996 1005 return (smb_timegm(&atm));
997 1006 }
998 1007
999 1008 void
1000 1009 smb_time_unix_to_dos(int32_t ux_time, int16_t *date_p, int16_t *time_p)
1001 1010 {
1002 1011 struct tm atm;
1003 1012 int i;
1004 1013 time_t tmp_time;
1005 1014
1006 1015 if (ux_time == 0) {
1007 1016 *date_p = 0;
1008 1017 *time_p = 0;
1009 1018 return;
1010 1019 }
1011 1020
1012 1021 tmp_time = (time_t)ux_time;
1013 1022 (void) smb_gmtime_r(&tmp_time, &atm);
1014 1023
1015 1024 if (date_p) {
1016 1025 i = 0;
1017 1026 i += atm.tm_year - 80;
1018 1027 i <<= 4;
1019 1028 i += atm.tm_mon + 1;
1020 1029 i <<= 5;
1021 1030 i += atm.tm_mday;
1022 1031
1023 1032 *date_p = (short)i;
1024 1033 }
1025 1034 if (time_p) {
1026 1035 i = 0;
1027 1036 i += atm.tm_hour;
1028 1037 i <<= 6;
1029 1038 i += atm.tm_min;
1030 1039 i <<= 5;
1031 1040 i += atm.tm_sec >> 1;
1032 1041
1033 1042 *time_p = (short)i;
1034 1043 }
1035 1044 }
1036 1045
1037 1046
1038 1047 /*
1039 1048 * smb_gmtime_r
1040 1049 *
1041 1050 * Thread-safe version of smb_gmtime. Returns a null pointer if either
1042 1051 * input parameter is a null pointer. Otherwise returns a pointer
1043 1052 * to result.
1044 1053 *
1045 1054 * Day of the week calculation: the Epoch was a thursday.
1046 1055 *
1047 1056 * There are no timezone corrections so tm_isdst and tm_gmtoff are
1048 1057 * always zero, and the zone is always WET.
1049 1058 */
1050 1059 struct tm *
1051 1060 smb_gmtime_r(time_t *clock, struct tm *result)
1052 1061 {
1053 1062 time_t tsec;
1054 1063 int year;
1055 1064 int month;
1056 1065 int sec_per_month;
1057 1066
1058 1067 if (clock == 0 || result == 0)
1059 1068 return (0);
1060 1069
1061 1070 bzero(result, sizeof (struct tm));
1062 1071 tsec = *clock;
1063 1072 tsec -= tzh_leapcnt;
1064 1073
1065 1074 result->tm_wday = tsec / SECSPERDAY;
1066 1075 result->tm_wday = (result->tm_wday + TM_THURSDAY) % DAYSPERWEEK;
1067 1076
1068 1077 year = EPOCH_YEAR;
1069 1078 while (tsec >= (isleap(year) ? (SECSPERDAY * DAYSPERLYEAR) :
1070 1079 (SECSPERDAY * DAYSPERNYEAR))) {
1071 1080 if (isleap(year))
1072 1081 tsec -= SECSPERDAY * DAYSPERLYEAR;
1073 1082 else
1074 1083 tsec -= SECSPERDAY * DAYSPERNYEAR;
1075 1084
1076 1085 ++year;
1077 1086 }
1078 1087
1079 1088 result->tm_year = year - TM_YEAR_BASE;
1080 1089 result->tm_yday = tsec / SECSPERDAY;
1081 1090
1082 1091 for (month = TM_JANUARY; month <= TM_DECEMBER; ++month) {
1083 1092 sec_per_month = days_in_month[month] * SECSPERDAY;
1084 1093
1085 1094 if (month == TM_FEBRUARY && isleap(year))
1086 1095 sec_per_month += SECSPERDAY;
1087 1096
1088 1097 if (tsec < sec_per_month)
1089 1098 break;
1090 1099
1091 1100 tsec -= sec_per_month;
1092 1101 }
1093 1102
1094 1103 result->tm_mon = month;
1095 1104 result->tm_mday = (tsec / SECSPERDAY) + 1;
1096 1105 tsec %= SECSPERDAY;
1097 1106 result->tm_sec = tsec % 60;
1098 1107 tsec /= 60;
1099 1108 result->tm_min = tsec % 60;
1100 1109 tsec /= 60;
1101 1110 result->tm_hour = (int)tsec;
1102 1111
1103 1112 return (result);
1104 1113 }
1105 1114
1106 1115
1107 1116 /*
1108 1117 * smb_timegm
1109 1118 *
1110 1119 * Converts the broken-down time in tm to a time value, i.e. the number
1111 1120 * of seconds since the Epoch (00:00:00 UTC, January 1, 1970). This is
1112 1121 * not a POSIX or ANSI function. Per the man page, the input values of
1113 1122 * tm_wday and tm_yday are ignored and, as the input data is assumed to
1114 1123 * represent GMT, we force tm_isdst and tm_gmtoff to 0.
1115 1124 *
1116 1125 * Before returning the clock time, we use smb_gmtime_r to set up tm_wday
1117 1126 * and tm_yday, and bring the other fields within normal range. I don't
1118 1127 * think this is really how it should be done but it's convenient for
1119 1128 * now.
1120 1129 */
1121 1130 time_t
1122 1131 smb_timegm(struct tm *tm)
1123 1132 {
1124 1133 time_t tsec;
1125 1134 int dd;
1126 1135 int mm;
1127 1136 int yy;
1128 1137 int year;
1129 1138
1130 1139 if (tm == 0)
1131 1140 return (-1);
1132 1141
1133 1142 year = tm->tm_year + TM_YEAR_BASE;
1134 1143 tsec = tzh_leapcnt;
1135 1144
1136 1145 for (yy = EPOCH_YEAR; yy < year; ++yy) {
1137 1146 if (isleap(yy))
1138 1147 tsec += SECSPERDAY * DAYSPERLYEAR;
1139 1148 else
1140 1149 tsec += SECSPERDAY * DAYSPERNYEAR;
1141 1150 }
1142 1151
1143 1152 for (mm = TM_JANUARY; mm < tm->tm_mon; ++mm) {
1144 1153 dd = days_in_month[mm] * SECSPERDAY;
1145 1154
1146 1155 if (mm == TM_FEBRUARY && isleap(year))
1147 1156 dd += SECSPERDAY;
1148 1157
1149 1158 tsec += dd;
1150 1159 }
1151 1160
1152 1161 tsec += (tm->tm_mday - 1) * SECSPERDAY;
1153 1162 tsec += tm->tm_sec;
1154 1163 tsec += tm->tm_min * SECSPERMIN;
1155 1164 tsec += tm->tm_hour * SECSPERHOUR;
1156 1165
1157 1166 tm->tm_isdst = 0;
1158 1167 (void) smb_gmtime_r(&tsec, tm);
1159 1168 return (tsec);
1160 1169 }
1161 1170
1162 1171 /*
1163 1172 * smb_pad_align
1164 1173 *
1165 1174 * Returns the number of bytes required to pad an offset to the
1166 1175 * specified alignment.
1167 1176 */
1168 1177 uint32_t
1169 1178 smb_pad_align(uint32_t offset, uint32_t align)
1170 1179 {
1171 1180 uint32_t pad = offset % align;
1172 1181
1173 1182 if (pad != 0)
1174 1183 pad = align - pad;
1175 1184
1176 1185 return (pad);
1177 1186 }
1178 1187
1179 1188 /*
1180 1189 * smb_panic
1181 1190 *
1182 1191 * Logs the file name, function name and line number passed in and panics the
1183 1192 * system.
1184 1193 */
1185 1194 void
1186 1195 smb_panic(char *file, const char *func, int line)
1187 1196 {
1188 1197 cmn_err(CE_PANIC, "%s:%s:%d\n", file, func, line);
1189 1198 }
1190 1199
1191 1200 /*
1192 1201 * Creates an AVL tree and initializes the given smb_avl_t
1193 1202 * structure using the passed args
1194 1203 */
1195 1204 void
1196 1205 smb_avl_create(smb_avl_t *avl, size_t size, size_t offset,
1197 1206 const smb_avl_nops_t *ops)
1198 1207 {
1199 1208 ASSERT(avl);
1200 1209 ASSERT(ops);
1201 1210
1202 1211 rw_init(&avl->avl_lock, NULL, RW_DEFAULT, NULL);
1203 1212 mutex_init(&avl->avl_mutex, NULL, MUTEX_DEFAULT, NULL);
1204 1213
1205 1214 avl->avl_nops = ops;
1206 1215 avl->avl_state = SMB_AVL_STATE_READY;
1207 1216 avl->avl_refcnt = 0;
1208 1217 (void) random_get_pseudo_bytes((uint8_t *)&avl->avl_sequence,
1209 1218 sizeof (uint32_t));
1210 1219
1211 1220 avl_create(&avl->avl_tree, ops->avln_cmp, size, offset);
1212 1221 }
1213 1222
1214 1223 /*
1215 1224 * Destroys the specified AVL tree.
1216 1225 * It waits for all the in-flight operations to finish
1217 1226 * before destroying the AVL.
1218 1227 */
1219 1228 void
1220 1229 smb_avl_destroy(smb_avl_t *avl)
1221 1230 {
1222 1231 void *cookie = NULL;
1223 1232 void *node;
1224 1233
1225 1234 ASSERT(avl);
1226 1235
1227 1236 mutex_enter(&avl->avl_mutex);
1228 1237 if (avl->avl_state != SMB_AVL_STATE_READY) {
1229 1238 mutex_exit(&avl->avl_mutex);
1230 1239 return;
1231 1240 }
1232 1241
1233 1242 avl->avl_state = SMB_AVL_STATE_DESTROYING;
1234 1243
1235 1244 while (avl->avl_refcnt > 0)
1236 1245 (void) cv_wait(&avl->avl_cv, &avl->avl_mutex);
1237 1246 mutex_exit(&avl->avl_mutex);
1238 1247
1239 1248 rw_enter(&avl->avl_lock, RW_WRITER);
1240 1249 while ((node = avl_destroy_nodes(&avl->avl_tree, &cookie)) != NULL)
1241 1250 avl->avl_nops->avln_destroy(node);
1242 1251
1243 1252 avl_destroy(&avl->avl_tree);
1244 1253 rw_exit(&avl->avl_lock);
1245 1254
1246 1255 rw_destroy(&avl->avl_lock);
1247 1256
|
↓ open down ↓ |
342 lines elided |
↑ open up ↑ |
1248 1257 mutex_destroy(&avl->avl_mutex);
1249 1258 bzero(avl, sizeof (smb_avl_t));
1250 1259 }
1251 1260
1252 1261 /*
1253 1262 * Adds the given item to the AVL if it's
1254 1263 * not already there.
1255 1264 *
1256 1265 * Returns:
1257 1266 *
1258 - * ENOTACTIVE AVL is not in READY state
1259 - * EEXIST The item is already in AVL
1267 + * ENOTACTIVE AVL is not in READY state
1268 + * EEXIST The item is already in AVL
1260 1269 */
1261 1270 int
1262 1271 smb_avl_add(smb_avl_t *avl, void *item)
1263 1272 {
1264 1273 avl_index_t where;
1265 1274
1266 1275 ASSERT(avl);
1267 1276 ASSERT(item);
1268 1277
1269 1278 if (!smb_avl_hold(avl))
1270 1279 return (ENOTACTIVE);
1271 1280
1272 1281 rw_enter(&avl->avl_lock, RW_WRITER);
1273 1282 if (avl_find(&avl->avl_tree, item, &where) != NULL) {
1274 1283 rw_exit(&avl->avl_lock);
1275 1284 smb_avl_rele(avl);
1276 1285 return (EEXIST);
1277 1286 }
1278 1287
1279 1288 avl_insert(&avl->avl_tree, item, where);
1280 1289 avl->avl_sequence++;
1281 1290 rw_exit(&avl->avl_lock);
1282 1291
1283 1292 smb_avl_rele(avl);
1284 1293 return (0);
1285 1294 }
1286 1295
1287 1296 /*
1288 1297 * Removes the given item from the AVL.
1289 1298 * If no reference is left on the item
1290 1299 * it will also be destroyed by calling the
1291 1300 * registered destroy operation.
1292 1301 */
1293 1302 void
1294 1303 smb_avl_remove(smb_avl_t *avl, void *item)
1295 1304 {
1296 1305 avl_index_t where;
1297 1306 void *rm_item;
1298 1307
1299 1308 ASSERT(avl);
1300 1309 ASSERT(item);
1301 1310
1302 1311 if (!smb_avl_hold(avl))
1303 1312 return;
1304 1313
1305 1314 rw_enter(&avl->avl_lock, RW_WRITER);
1306 1315 if ((rm_item = avl_find(&avl->avl_tree, item, &where)) == NULL) {
1307 1316 rw_exit(&avl->avl_lock);
1308 1317 smb_avl_rele(avl);
1309 1318 return;
1310 1319 }
1311 1320
1312 1321 avl_remove(&avl->avl_tree, rm_item);
1313 1322 if (avl->avl_nops->avln_rele(rm_item))
1314 1323 avl->avl_nops->avln_destroy(rm_item);
1315 1324 avl->avl_sequence++;
1316 1325 rw_exit(&avl->avl_lock);
1317 1326
1318 1327 smb_avl_rele(avl);
1319 1328 }
1320 1329
1321 1330 /*
1322 1331 * Looks up the AVL for the given item.
1323 1332 * If the item is found a hold on the object
1324 1333 * is taken before the pointer to it is
1325 1334 * returned to the caller. The caller MUST
1326 1335 * always call smb_avl_release() after it's done
1327 1336 * using the returned object to release the hold
1328 1337 * taken on the object.
1329 1338 */
1330 1339 void *
1331 1340 smb_avl_lookup(smb_avl_t *avl, void *item)
1332 1341 {
1333 1342 void *node = NULL;
1334 1343
1335 1344 ASSERT(avl);
1336 1345 ASSERT(item);
1337 1346
1338 1347 if (!smb_avl_hold(avl))
1339 1348 return (NULL);
1340 1349
1341 1350 rw_enter(&avl->avl_lock, RW_READER);
1342 1351 node = avl_find(&avl->avl_tree, item, NULL);
1343 1352 if (node != NULL)
1344 1353 avl->avl_nops->avln_hold(node);
1345 1354 rw_exit(&avl->avl_lock);
1346 1355
1347 1356 if (node == NULL)
1348 1357 smb_avl_rele(avl);
1349 1358
1350 1359 return (node);
1351 1360 }
1352 1361
1353 1362 /*
1354 1363 * The hold on the given object is released.
1355 1364 * This function MUST always be called after
1356 1365 * smb_avl_lookup() and smb_avl_iterate() for
1357 1366 * the returned object.
1358 1367 *
1359 1368 * If AVL is in DESTROYING state, the destroying
1360 1369 * thread will be notified.
1361 1370 */
1362 1371 void
1363 1372 smb_avl_release(smb_avl_t *avl, void *item)
1364 1373 {
1365 1374 ASSERT(avl);
1366 1375 ASSERT(item);
1367 1376
1368 1377 if (avl->avl_nops->avln_rele(item))
1369 1378 avl->avl_nops->avln_destroy(item);
1370 1379
1371 1380 smb_avl_rele(avl);
1372 1381 }
1373 1382
1374 1383 /*
1375 1384 * Initializes the given cursor for the AVL.
1376 1385 * The cursor will be used to iterate through the AVL
1377 1386 */
1378 1387 void
1379 1388 smb_avl_iterinit(smb_avl_t *avl, smb_avl_cursor_t *cursor)
1380 1389 {
1381 1390 ASSERT(avl);
1382 1391 ASSERT(cursor);
1383 1392
1384 1393 cursor->avlc_next = NULL;
1385 1394 cursor->avlc_sequence = avl->avl_sequence;
1386 1395 }
1387 1396
1388 1397 /*
1389 1398 * Iterates through the AVL using the given cursor.
1390 1399 * It always starts at the beginning and then returns
1391 1400 * a pointer to the next object on each subsequent call.
1392 1401 *
1393 1402 * If a new object is added to or removed from the AVL
1394 1403 * between two calls to this function, the iteration
1395 1404 * will terminate prematurely.
1396 1405 *
1397 1406 * The caller MUST always call smb_avl_release() after it's
1398 1407 * done using the returned object to release the hold taken
1399 1408 * on the object.
1400 1409 */
1401 1410 void *
1402 1411 smb_avl_iterate(smb_avl_t *avl, smb_avl_cursor_t *cursor)
1403 1412 {
1404 1413 void *node;
1405 1414
1406 1415 ASSERT(avl);
1407 1416 ASSERT(cursor);
1408 1417
1409 1418 if (!smb_avl_hold(avl))
1410 1419 return (NULL);
1411 1420
1412 1421 rw_enter(&avl->avl_lock, RW_READER);
1413 1422 if (cursor->avlc_sequence != avl->avl_sequence) {
1414 1423 rw_exit(&avl->avl_lock);
1415 1424 smb_avl_rele(avl);
1416 1425 return (NULL);
1417 1426 }
1418 1427
1419 1428 if (cursor->avlc_next == NULL)
1420 1429 node = avl_first(&avl->avl_tree);
1421 1430 else
1422 1431 node = AVL_NEXT(&avl->avl_tree, cursor->avlc_next);
1423 1432
1424 1433 if (node != NULL)
1425 1434 avl->avl_nops->avln_hold(node);
1426 1435
1427 1436 cursor->avlc_next = node;
1428 1437 rw_exit(&avl->avl_lock);
1429 1438
1430 1439 if (node == NULL)
1431 1440 smb_avl_rele(avl);
1432 1441
1433 1442 return (node);
1434 1443 }
1435 1444
1436 1445 /*
1437 1446 * Increments the AVL reference count in order to
1438 1447 * prevent the avl from being destroyed while it's
1439 1448 * being accessed.
1440 1449 */
1441 1450 static boolean_t
1442 1451 smb_avl_hold(smb_avl_t *avl)
1443 1452 {
1444 1453 mutex_enter(&avl->avl_mutex);
1445 1454 if (avl->avl_state != SMB_AVL_STATE_READY) {
1446 1455 mutex_exit(&avl->avl_mutex);
1447 1456 return (B_FALSE);
1448 1457 }
1449 1458 avl->avl_refcnt++;
1450 1459 mutex_exit(&avl->avl_mutex);
1451 1460
1452 1461 return (B_TRUE);
1453 1462 }
1454 1463
1455 1464 /*
1456 1465 * Decrements the AVL reference count to release the
1457 1466 * hold. If another thread is trying to destroy the
1458 1467 * AVL and is waiting for the reference count to become
1459 1468 * 0, it is signaled to wake up.
1460 1469 */
1461 1470 static void
1462 1471 smb_avl_rele(smb_avl_t *avl)
1463 1472 {
1464 1473 mutex_enter(&avl->avl_mutex);
1465 1474 ASSERT(avl->avl_refcnt > 0);
1466 1475 avl->avl_refcnt--;
1467 1476 if (avl->avl_state == SMB_AVL_STATE_DESTROYING)
1468 1477 cv_broadcast(&avl->avl_cv);
1469 1478 mutex_exit(&avl->avl_mutex);
1470 1479 }
1471 1480
1472 1481 /*
1473 1482 * smb_latency_init
1474 1483 */
1475 1484 void
1476 1485 smb_latency_init(smb_latency_t *lat)
1477 1486 {
1478 1487 bzero(lat, sizeof (*lat));
1479 1488 mutex_init(&lat->ly_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
1480 1489 }
1481 1490
1482 1491 /*
1483 1492 * smb_latency_destroy
1484 1493 */
1485 1494 void
1486 1495 smb_latency_destroy(smb_latency_t *lat)
1487 1496 {
1488 1497 mutex_destroy(&lat->ly_mutex);
1489 1498 }
1490 1499
1491 1500 /*
1492 1501 * smb_latency_add_sample
1493 1502 *
1494 1503 * Uses the new sample to calculate the new mean and standard deviation. The
1495 1504 * sample must be a scaled value.
1496 1505 */
1497 1506 void
1498 1507 smb_latency_add_sample(smb_latency_t *lat, hrtime_t sample)
1499 1508 {
1500 1509 hrtime_t a_mean;
1501 1510 hrtime_t d_mean;
1502 1511
1503 1512 mutex_enter(&lat->ly_mutex);
1504 1513 lat->ly_a_nreq++;
1505 1514 lat->ly_a_sum += sample;
1506 1515 if (lat->ly_a_nreq != 0) {
1507 1516 a_mean = lat->ly_a_sum / lat->ly_a_nreq;
1508 1517 lat->ly_a_stddev =
1509 1518 (sample - a_mean) * (sample - lat->ly_a_mean);
1510 1519 lat->ly_a_mean = a_mean;
1511 1520 }
1512 1521 lat->ly_d_nreq++;
1513 1522 lat->ly_d_sum += sample;
1514 1523 if (lat->ly_d_nreq != 0) {
1515 1524 d_mean = lat->ly_d_sum / lat->ly_d_nreq;
1516 1525 lat->ly_d_stddev =
1517 1526 (sample - d_mean) * (sample - lat->ly_d_mean);
1518 1527 lat->ly_d_mean = d_mean;
1519 1528 }
1520 1529 mutex_exit(&lat->ly_mutex);
1521 1530 }
1522 1531
1523 1532 /*
1524 1533 * smb_srqueue_init
1525 1534 */
1526 1535 void
1527 1536 smb_srqueue_init(smb_srqueue_t *srq)
1528 1537 {
1529 1538 bzero(srq, sizeof (*srq));
1530 1539 mutex_init(&srq->srq_mutex, NULL, MUTEX_SPIN, (void *)ipltospl(SPL7));
1531 1540 srq->srq_wlastupdate = srq->srq_rlastupdate = gethrtime_unscaled();
1532 1541 }
1533 1542
1534 1543 /*
1535 1544 * smb_srqueue_destroy
1536 1545 */
1537 1546 void
1538 1547 smb_srqueue_destroy(smb_srqueue_t *srq)
1539 1548 {
1540 1549 mutex_destroy(&srq->srq_mutex);
1541 1550 }
1542 1551
1543 1552 /*
1544 1553 * smb_srqueue_waitq_enter
1545 1554 */
1546 1555 void
1547 1556 smb_srqueue_waitq_enter(smb_srqueue_t *srq)
1548 1557 {
1549 1558 hrtime_t new;
1550 1559 hrtime_t delta;
1551 1560 uint32_t wcnt;
1552 1561
1553 1562 mutex_enter(&srq->srq_mutex);
1554 1563 new = gethrtime_unscaled();
1555 1564 delta = new - srq->srq_wlastupdate;
1556 1565 srq->srq_wlastupdate = new;
1557 1566 wcnt = srq->srq_wcnt++;
1558 1567 if (wcnt != 0) {
1559 1568 srq->srq_wlentime += delta * wcnt;
1560 1569 srq->srq_wtime += delta;
1561 1570 }
1562 1571 mutex_exit(&srq->srq_mutex);
1563 1572 }
1564 1573
1565 1574 /*
1566 1575 * smb_srqueue_runq_exit
1567 1576 */
1568 1577 void
1569 1578 smb_srqueue_runq_exit(smb_srqueue_t *srq)
1570 1579 {
1571 1580 hrtime_t new;
1572 1581 hrtime_t delta;
1573 1582 uint32_t rcnt;
1574 1583
1575 1584 mutex_enter(&srq->srq_mutex);
1576 1585 new = gethrtime_unscaled();
1577 1586 delta = new - srq->srq_rlastupdate;
1578 1587 srq->srq_rlastupdate = new;
1579 1588 rcnt = srq->srq_rcnt--;
1580 1589 ASSERT(rcnt > 0);
1581 1590 srq->srq_rlentime += delta * rcnt;
1582 1591 srq->srq_rtime += delta;
1583 1592 mutex_exit(&srq->srq_mutex);
1584 1593 }
1585 1594
1586 1595 /*
1587 1596 * smb_srqueue_waitq_to_runq
1588 1597 */
1589 1598 void
1590 1599 smb_srqueue_waitq_to_runq(smb_srqueue_t *srq)
1591 1600 {
1592 1601 hrtime_t new;
1593 1602 hrtime_t delta;
1594 1603 uint32_t wcnt;
1595 1604 uint32_t rcnt;
1596 1605
1597 1606 mutex_enter(&srq->srq_mutex);
1598 1607 new = gethrtime_unscaled();
1599 1608 delta = new - srq->srq_wlastupdate;
1600 1609 srq->srq_wlastupdate = new;
1601 1610 wcnt = srq->srq_wcnt--;
1602 1611 ASSERT(wcnt > 0);
1603 1612 srq->srq_wlentime += delta * wcnt;
1604 1613 srq->srq_wtime += delta;
1605 1614 delta = new - srq->srq_rlastupdate;
1606 1615 srq->srq_rlastupdate = new;
1607 1616 rcnt = srq->srq_rcnt++;
1608 1617 if (rcnt != 0) {
1609 1618 srq->srq_rlentime += delta * rcnt;
1610 1619 srq->srq_rtime += delta;
1611 1620 }
1612 1621 mutex_exit(&srq->srq_mutex);
1613 1622 }
1614 1623
1615 1624 /*
1616 1625 * smb_srqueue_update
1617 1626 *
1618 1627 * Takes a snapshot of the smb_sr_stat_t structure passed in.
1619 1628 */
1620 1629 void
1621 1630 smb_srqueue_update(smb_srqueue_t *srq, smb_kstat_utilization_t *kd)
1622 1631 {
1623 1632 hrtime_t delta;
1624 1633 hrtime_t snaptime;
1625 1634
1626 1635 mutex_enter(&srq->srq_mutex);
1627 1636 snaptime = gethrtime_unscaled();
1628 1637 delta = snaptime - srq->srq_wlastupdate;
1629 1638 srq->srq_wlastupdate = snaptime;
1630 1639 if (srq->srq_wcnt != 0) {
1631 1640 srq->srq_wlentime += delta * srq->srq_wcnt;
1632 1641 srq->srq_wtime += delta;
1633 1642 }
1634 1643 delta = snaptime - srq->srq_rlastupdate;
1635 1644 srq->srq_rlastupdate = snaptime;
1636 1645 if (srq->srq_rcnt != 0) {
1637 1646 srq->srq_rlentime += delta * srq->srq_rcnt;
1638 1647 srq->srq_rtime += delta;
1639 1648 }
1640 1649 kd->ku_rlentime = srq->srq_rlentime;
1641 1650 kd->ku_rtime = srq->srq_rtime;
1642 1651 kd->ku_wlentime = srq->srq_wlentime;
1643 1652 kd->ku_wtime = srq->srq_wtime;
1644 1653 mutex_exit(&srq->srq_mutex);
1645 1654 scalehrtime(&kd->ku_rlentime);
1646 1655 scalehrtime(&kd->ku_rtime);
1647 1656 scalehrtime(&kd->ku_wlentime);
1648 1657 scalehrtime(&kd->ku_wtime);
1649 1658 }
1650 1659
1651 1660 void
1652 1661 smb_threshold_init(smb_cmd_threshold_t *ct, char *cmd,
1653 1662 uint_t threshold, uint_t timeout)
1654 1663 {
1655 1664 bzero(ct, sizeof (smb_cmd_threshold_t));
1656 1665 mutex_init(&ct->ct_mutex, NULL, MUTEX_DEFAULT, NULL);
1657 1666 cv_init(&ct->ct_cond, NULL, CV_DEFAULT, NULL);
1658 1667
1659 1668 ct->ct_cmd = cmd;
1660 1669 ct->ct_threshold = threshold;
1661 1670 ct->ct_timeout = timeout;
1662 1671 }
1663 1672
1664 1673 void
1665 1674 smb_threshold_fini(smb_cmd_threshold_t *ct)
1666 1675 {
1667 1676 cv_destroy(&ct->ct_cond);
1668 1677 mutex_destroy(&ct->ct_mutex);
1669 1678 }
1670 1679
1671 1680 /*
1672 1681 * This threshold mechanism is used to limit the number of simultaneous
1673 1682 * named pipe connections, concurrent authentication conversations, etc.
1674 1683 * Requests that would take us over the threshold wait until either the
1675 1684 * resources are available (return zero) or timeout (return error).
1676 1685 */
1677 1686 int
1678 1687 smb_threshold_enter(smb_cmd_threshold_t *ct)
1679 1688 {
1680 1689 clock_t time, rem;
1681 1690
1682 1691 time = MSEC_TO_TICK(ct->ct_timeout) + ddi_get_lbolt();
1683 1692 mutex_enter(&ct->ct_mutex);
1684 1693
1685 1694 while (ct->ct_threshold != 0 &&
1686 1695 ct->ct_threshold <= ct->ct_active_cnt) {
1687 1696 ct->ct_blocked_cnt++;
1688 1697 rem = cv_timedwait(&ct->ct_cond, &ct->ct_mutex, time);
1689 1698 ct->ct_blocked_cnt--;
1690 1699 if (rem < 0) {
1691 1700 mutex_exit(&ct->ct_mutex);
1692 1701 return (ETIME);
1693 1702 }
1694 1703 }
1695 1704 if (ct->ct_threshold == 0) {
1696 1705 mutex_exit(&ct->ct_mutex);
1697 1706 return (ECANCELED);
1698 1707 }
1699 1708
1700 1709 ASSERT3U(ct->ct_active_cnt, <, ct->ct_threshold);
1701 1710 ct->ct_active_cnt++;
1702 1711
1703 1712 mutex_exit(&ct->ct_mutex);
1704 1713 return (0);
1705 1714 }
1706 1715
1707 1716 void
1708 1717 smb_threshold_exit(smb_cmd_threshold_t *ct)
1709 1718 {
1710 1719 mutex_enter(&ct->ct_mutex);
1711 1720 ASSERT3U(ct->ct_active_cnt, >, 0);
1712 1721 ct->ct_active_cnt--;
1713 1722 if (ct->ct_blocked_cnt)
1714 1723 cv_signal(&ct->ct_cond);
|
↓ open down ↓ |
445 lines elided |
↑ open up ↑ |
1715 1724 mutex_exit(&ct->ct_mutex);
1716 1725 }
1717 1726
1718 1727 void
1719 1728 smb_threshold_wake_all(smb_cmd_threshold_t *ct)
1720 1729 {
1721 1730 mutex_enter(&ct->ct_mutex);
1722 1731 ct->ct_threshold = 0;
1723 1732 cv_broadcast(&ct->ct_cond);
1724 1733 mutex_exit(&ct->ct_mutex);
1734 +}
1735 +
1736 +/* taken from mod_hash_byptr */
1737 +uint_t
1738 +smb_hash_uint64(smb_hash_t *hash, uint64_t val)
1739 +{
1740 + uint64_t k = val >> hash->rshift;
1741 + uint_t idx = ((uint_t)k) & (hash->num_buckets - 1);
1742 +
1743 + return (idx);
1744 +}
1745 +
1746 +boolean_t
1747 +smb_is_pow2(size_t n)
1748 +{
1749 + return ((n & (n - 1)) == 0);
1750 +}
1751 +
1752 +smb_hash_t *
1753 +smb_hash_create(size_t elemsz, size_t link_offset,
1754 + uint32_t num_buckets)
1755 +{
1756 + smb_hash_t *hash = kmem_alloc(sizeof (*hash), KM_SLEEP);
1757 + int i;
1758 +
1759 + if (!smb_is_pow2(num_buckets))
1760 + num_buckets = 1 << highbit(num_buckets);
1761 +
1762 + hash->rshift = highbit(elemsz);
1763 + hash->num_buckets = num_buckets;
1764 + hash->buckets = kmem_zalloc(num_buckets * sizeof (smb_bucket_t),
1765 + KM_SLEEP);
1766 + for (i = 0; i < num_buckets; i++)
1767 + smb_llist_constructor(&hash->buckets[i].b_list, elemsz,
1768 + link_offset);
1769 + return (hash);
1770 +}
1771 +
1772 +void
1773 +smb_hash_destroy(smb_hash_t *hash)
1774 +{
1775 + int i;
1776 +
1777 + for (i = 0; i < hash->num_buckets; i++)
1778 + smb_llist_destructor(&hash->buckets[i].b_list);
1779 +
1780 + kmem_free(hash->buckets, hash->num_buckets * sizeof (smb_bucket_t));
1781 + kmem_free(hash, sizeof (*hash));
1725 1782 }
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX