Print this page
NEX-5733 cleanup qlt/qlc
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
Reviewed by: Alek Pinchuk <alek.pinchuk@nexenta.com>
NEX-5717 import QLogic 16G FC drivers
Reviewed by: Steve Peng <steve.peng@nexenta.com>
Reviewed by: Josef 'Jeff' Sipek <josef.sipek@nexenta.com>
Reviewed by: Yuri Pankov <yuri.pankov@nexenta.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/comstar/port/qlt/qlt_dma.c
+++ new/usr/src/uts/common/io/comstar/port/qlt/qlt_dma.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 9 * or http://www.opensolaris.org/os/licensing.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
|
↓ open down ↓ |
12 lines elided |
↑ open up ↑ |
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 - * Copyright 2009 QLogic Corporation. All rights reserved.
23 + * Copyright 2009-2015 QLogic Corporation. All rights reserved.
24 24 * Use is subject to license terms.
25 25 */
26 26
27 27 /*
28 - * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
28 + * Copyright (c) 2008, 2015, Oracle and/or its affiliates. All rights reserved.
29 29 */
30 30
31 31 #include <sys/conf.h>
32 32 #include <sys/ddi.h>
33 33 #include <sys/sunddi.h>
34 34 #include <sys/modctl.h>
35 35
36 36 #include <sys/stmf_defines.h>
37 37 #include <sys/fct_defines.h>
38 38 #include <sys/stmf.h>
39 39 #include <sys/portif.h>
40 40 #include <sys/fct.h>
41 41
42 42 #include "qlt.h"
43 43 #include "qlt_dma.h"
44 44
45 45 /*
46 46 * Local Function Prototypes.
47 47 */
48 48 static void
49 49 qlt_dma_free_handles(qlt_state_t *qlt, qlt_dma_handle_t *first_handle);
50 50
51 51 #define BUF_COUNT_2K 2048
52 52 #define BUF_COUNT_8K 512
53 53 #define BUF_COUNT_64K 256
54 54 #define BUF_COUNT_128K 1024
55 55 #define BUF_COUNT_256K 8
56 56
57 57 #define QLT_DMEM_MAX_BUF_SIZE (4 * 65536)
58 58 #define QLT_DMEM_NBUCKETS 5
59 59 static qlt_dmem_bucket_t bucket2K = { 2048, BUF_COUNT_2K },
60 60 bucket8K = { 8192, BUF_COUNT_8K },
61 61 bucket64K = { 65536, BUF_COUNT_64K },
62 62 bucket128k = { (2 * 65536), BUF_COUNT_128K },
63 63 bucket256k = { (4 * 65536), BUF_COUNT_256K };
64 64
65 65 static qlt_dmem_bucket_t *dmem_buckets[] = { &bucket2K, &bucket8K,
66 66 &bucket64K, &bucket128k, &bucket256k, NULL };
67 67 static ddi_device_acc_attr_t acc;
68 68 static ddi_dma_attr_t qlt_scsi_dma_attr = {
69 69 DMA_ATTR_V0, /* dma_attr_version */
70 70 0, /* low DMA address range */
71 71 0xffffffffffffffff, /* high DMA address range */
72 72 0xffffffff, /* DMA counter register */
73 73 8192, /* DMA address alignment */
74 74 0xff, /* DMA burstsizes */
75 75 1, /* min effective DMA size */
76 76 0xffffffff, /* max DMA xfer size */
77 77 0xffffffff, /* segment boundary */
78 78 1, /* s/g list length */
79 79 1, /* granularity of device */
80 80 0 /* DMA transfer flags */
81 81 };
82 82
83 83 fct_status_t
84 84 qlt_dmem_init(qlt_state_t *qlt)
85 85 {
86 86 qlt_dmem_bucket_t *p;
87 87 qlt_dmem_bctl_t *bctl, *bc;
88 88 qlt_dmem_bctl_t *prev;
89 89 int ndx, i;
90 90 uint32_t total_mem;
91 91 uint8_t *addr;
92 92 uint8_t *host_addr;
93 93 uint64_t dev_addr;
94 94 ddi_dma_cookie_t cookie;
95 95 uint32_t ncookie;
96 96 uint32_t bsize;
97 97 size_t len;
98 98
99 99 if (qlt->qlt_bucketcnt[0] != 0) {
100 100 bucket2K.dmem_nbufs = qlt->qlt_bucketcnt[0];
101 101 }
102 102 if (qlt->qlt_bucketcnt[1] != 0) {
103 103 bucket8K.dmem_nbufs = qlt->qlt_bucketcnt[1];
104 104 }
105 105 if (qlt->qlt_bucketcnt[2] != 0) {
106 106 bucket64K.dmem_nbufs = qlt->qlt_bucketcnt[2];
107 107 }
108 108 if (qlt->qlt_bucketcnt[3] != 0) {
109 109 bucket128k.dmem_nbufs = qlt->qlt_bucketcnt[3];
110 110 }
111 111 if (qlt->qlt_bucketcnt[4] != 0) {
112 112 bucket256k.dmem_nbufs = qlt->qlt_bucketcnt[4];
113 113 }
114 114
115 115 bsize = sizeof (dmem_buckets);
116 116 ndx = (int)(bsize / sizeof (void *));
117 117 /*
118 118 * The reason it is ndx - 1 everywhere is becasue the last bucket
119 119 * pointer is NULL.
120 120 */
121 121 qlt->dmem_buckets = (qlt_dmem_bucket_t **)kmem_zalloc(bsize +
122 122 ((ndx - 1) * (int)sizeof (qlt_dmem_bucket_t)), KM_SLEEP);
123 123 for (i = 0; i < (ndx - 1); i++) {
124 124 qlt->dmem_buckets[i] = (qlt_dmem_bucket_t *)
125 125 ((uint8_t *)qlt->dmem_buckets + bsize +
126 126 (i * (int)sizeof (qlt_dmem_bucket_t)));
127 127 bcopy(dmem_buckets[i], qlt->dmem_buckets[i],
128 128 sizeof (qlt_dmem_bucket_t));
129 129 }
130 130 bzero(&acc, sizeof (acc));
131 131 acc.devacc_attr_version = DDI_DEVICE_ATTR_V0;
132 132 acc.devacc_attr_endian_flags = DDI_NEVERSWAP_ACC;
133 133 acc.devacc_attr_dataorder = DDI_STRICTORDER_ACC;
134 134 for (ndx = 0; (p = qlt->dmem_buckets[ndx]) != NULL; ndx++) {
135 135 bctl = (qlt_dmem_bctl_t *)kmem_zalloc(p->dmem_nbufs *
136 136 sizeof (qlt_dmem_bctl_t), KM_NOSLEEP);
137 137 if (bctl == NULL) {
138 138 EL(qlt, "bctl==NULL\n");
139 139 goto alloc_bctl_failed;
140 140 }
141 141 p->dmem_bctls_mem = bctl;
142 142 mutex_init(&p->dmem_lock, NULL, MUTEX_DRIVER, NULL);
143 143 if ((i = ddi_dma_alloc_handle(qlt->dip, &qlt_scsi_dma_attr,
144 144 DDI_DMA_SLEEP, 0, &p->dmem_dma_handle)) != DDI_SUCCESS) {
145 145 EL(qlt, "ddi_dma_alloc_handle status=%xh\n", i);
146 146 goto alloc_handle_failed;
147 147 }
148 148
149 149 total_mem = p->dmem_buf_size * p->dmem_nbufs;
150 150
151 151 if ((i = ddi_dma_mem_alloc(p->dmem_dma_handle, total_mem, &acc,
152 152 DDI_DMA_STREAMING, DDI_DMA_DONTWAIT, 0, (caddr_t *)&addr,
153 153 &len, &p->dmem_acc_handle)) != DDI_SUCCESS) {
154 154 EL(qlt, "ddi_dma_mem_alloc status=%xh\n", i);
155 155 goto mem_alloc_failed;
156 156 }
157 157
158 158 if ((i = ddi_dma_addr_bind_handle(p->dmem_dma_handle, NULL,
159 159 (caddr_t)addr, total_mem, DDI_DMA_RDWR | DDI_DMA_STREAMING,
160 160 DDI_DMA_DONTWAIT, 0, &cookie, &ncookie)) != DDI_SUCCESS) {
161 161 EL(qlt, "ddi_dma_addr_bind_handle status=%xh\n", i);
162 162 goto addr_bind_handle_failed;
163 163 }
164 164 if (ncookie != 1) {
165 165 EL(qlt, "ncookie=%d\n", ncookie);
166 166 goto dmem_init_failed;
167 167 }
168 168
169 169 p->dmem_host_addr = host_addr = addr;
170 170 p->dmem_dev_addr = dev_addr = (uint64_t)cookie.dmac_laddress;
171 171 bsize = p->dmem_buf_size;
172 172 p->dmem_bctl_free_list = bctl;
173 173 p->dmem_nbufs_free = p->dmem_nbufs;
174 174 for (i = 0; i < p->dmem_nbufs; i++) {
175 175 stmf_data_buf_t *db;
176 176 prev = bctl;
177 177 bctl->bctl_bucket = p;
178 178 bctl->bctl_buf = db = stmf_alloc(STMF_STRUCT_DATA_BUF,
179 179 0, 0);
180 180 db->db_port_private = bctl;
181 181 db->db_sglist[0].seg_addr = host_addr;
182 182 bctl->bctl_dev_addr = dev_addr;
183 183 db->db_sglist[0].seg_length = db->db_buf_size = bsize;
184 184 db->db_sglist_length = 1;
185 185 host_addr += bsize;
186 186 dev_addr += bsize;
187 187 bctl++;
188 188 prev->bctl_next = bctl;
189 189 }
190 190 prev->bctl_next = NULL;
191 191 }
192 192
193 193 return (QLT_SUCCESS);
194 194
195 195 dmem_failure_loop:;
196 196 bc = bctl;
197 197 while (bc) {
198 198 stmf_free(bc->bctl_buf);
199 199 bc = bc->bctl_next;
200 200 }
201 201 dmem_init_failed:;
202 202 (void) ddi_dma_unbind_handle(p->dmem_dma_handle);
203 203 addr_bind_handle_failed:;
204 204 ddi_dma_mem_free(&p->dmem_acc_handle);
205 205 mem_alloc_failed:;
206 206 ddi_dma_free_handle(&p->dmem_dma_handle);
|
↓ open down ↓ |
168 lines elided |
↑ open up ↑ |
207 207 alloc_handle_failed:;
208 208 kmem_free(p->dmem_bctls_mem, p->dmem_nbufs * sizeof (qlt_dmem_bctl_t));
209 209 mutex_destroy(&p->dmem_lock);
210 210 alloc_bctl_failed:;
211 211 if (--ndx >= 0) {
212 212 p = qlt->dmem_buckets[ndx];
213 213 bctl = p->dmem_bctl_free_list;
214 214 goto dmem_failure_loop;
215 215 }
216 216 kmem_free(qlt->dmem_buckets, sizeof (dmem_buckets) +
217 - ((sizeof (dmem_buckets)/sizeof (void *))
218 - *sizeof (qlt_dmem_bucket_t)));
217 + (((sizeof (dmem_buckets)/sizeof (void *))-1)*
218 + sizeof (qlt_dmem_bucket_t)));
219 219 qlt->dmem_buckets = NULL;
220 220
221 221 return (QLT_FAILURE);
222 222 }
223 223
224 224 void
225 225 qlt_dma_handle_pool_init(qlt_state_t *qlt)
226 226 {
227 227 qlt_dma_handle_pool_t *pool;
228 228
229 229 pool = kmem_zalloc(sizeof (*pool), KM_SLEEP);
230 230 mutex_init(&pool->pool_lock, NULL, MUTEX_DRIVER, NULL);
231 231 qlt->qlt_dma_handle_pool = pool;
232 232 }
233 233
234 234 void
235 235 qlt_dma_handle_pool_fini(qlt_state_t *qlt)
236 236 {
237 237 qlt_dma_handle_pool_t *pool;
238 238 qlt_dma_handle_t *handle, *next_handle;
239 239
240 240 pool = qlt->qlt_dma_handle_pool;
241 241 mutex_enter(&pool->pool_lock);
242 242 /*
243 243 * XXX Need to wait for free == total elements
244 244 * XXX Not sure how other driver shutdown stuff is done.
245 245 */
246 246 ASSERT(pool->num_free == pool->num_total);
247 247 if (pool->num_free != pool->num_total)
248 248 cmn_err(CE_WARN,
249 249 "num_free %d != num_total %d\n",
250 250 pool->num_free, pool->num_total);
251 251 handle = pool->free_list;
252 252 while (handle) {
253 253 next_handle = handle->next;
254 254 kmem_free(handle, sizeof (*handle));
255 255 handle = next_handle;
256 256 }
257 257 qlt->qlt_dma_handle_pool = NULL;
258 258 mutex_destroy(&pool->pool_lock);
259 259 kmem_free(pool, sizeof (*pool));
260 260 }
261 261
262 262 void
263 263 qlt_dmem_fini(qlt_state_t *qlt)
264 264 {
265 265 qlt_dmem_bucket_t *p;
266 266 qlt_dmem_bctl_t *bctl;
267 267 int ndx;
268 268
269 269 for (ndx = 0; (p = qlt->dmem_buckets[ndx]) != NULL; ndx++) {
270 270 bctl = p->dmem_bctl_free_list;
271 271 while (bctl) {
272 272 stmf_free(bctl->bctl_buf);
273 273 bctl = bctl->bctl_next;
274 274 }
275 275 bctl = p->dmem_bctl_free_list;
276 276 (void) ddi_dma_unbind_handle(p->dmem_dma_handle);
277 277 ddi_dma_mem_free(&p->dmem_acc_handle);
278 278 ddi_dma_free_handle(&p->dmem_dma_handle);
279 279 kmem_free(p->dmem_bctls_mem,
280 280 p->dmem_nbufs * sizeof (qlt_dmem_bctl_t));
281 281 mutex_destroy(&p->dmem_lock);
282 282 }
283 283 kmem_free(qlt->dmem_buckets, sizeof (dmem_buckets) +
284 284 (((sizeof (dmem_buckets)/sizeof (void *))-1)*
285 285 sizeof (qlt_dmem_bucket_t)));
286 286 qlt->dmem_buckets = NULL;
287 287 }
288 288
289 289 stmf_data_buf_t *
290 290 qlt_dmem_alloc(fct_local_port_t *port, uint32_t size, uint32_t *pminsize,
291 291 uint32_t flags)
292 292 {
293 293 return (qlt_i_dmem_alloc((qlt_state_t *)
294 294 port->port_fca_private, size, pminsize,
295 295 flags));
296 296 }
297 297
298 298 /* ARGSUSED */
299 299 stmf_data_buf_t *
300 300 qlt_i_dmem_alloc(qlt_state_t *qlt, uint32_t size, uint32_t *pminsize,
301 301 uint32_t flags)
302 302 {
303 303 qlt_dmem_bucket_t *p;
304 304 qlt_dmem_bctl_t *bctl;
305 305 int i;
306 306 uint32_t size_possible = 0;
307 307
308 308 if (size > QLT_DMEM_MAX_BUF_SIZE) {
309 309 goto qlt_try_partial_alloc;
310 310 }
311 311
312 312 /* 1st try to do a full allocation */
313 313 for (i = 0; (p = qlt->dmem_buckets[i]) != NULL; i++) {
314 314 if (p->dmem_buf_size >= size) {
315 315 if (p->dmem_nbufs_free) {
316 316 mutex_enter(&p->dmem_lock);
317 317 bctl = p->dmem_bctl_free_list;
318 318 if (bctl == NULL) {
319 319 mutex_exit(&p->dmem_lock);
320 320 continue;
321 321 }
322 322 p->dmem_bctl_free_list =
323 323 bctl->bctl_next;
324 324 p->dmem_nbufs_free--;
325 325 qlt->qlt_bufref[i]++;
326 326 mutex_exit(&p->dmem_lock);
327 327 bctl->bctl_buf->db_data_size = size;
328 328 return (bctl->bctl_buf);
329 329 } else {
330 330 qlt->qlt_bumpbucket++;
331 331 }
332 332 }
333 333 }
334 334
335 335 qlt_try_partial_alloc:
336 336
337 337 qlt->qlt_pmintry++;
338 338
339 339 /* Now go from high to low */
340 340 for (i = QLT_DMEM_NBUCKETS - 1; i >= 0; i--) {
341 341 p = qlt->dmem_buckets[i];
342 342 if (p->dmem_nbufs_free == 0)
343 343 continue;
344 344 if (!size_possible) {
345 345 size_possible = p->dmem_buf_size;
346 346 }
347 347 if (*pminsize > p->dmem_buf_size) {
348 348 /* At this point we know the request is failing. */
349 349 if (size_possible) {
350 350 /*
351 351 * This caller is asking too much. We already
352 352 * know what we can give, so get out.
353 353 */
354 354 break;
355 355 } else {
356 356 /*
357 357 * Lets continue to find out and tell what
358 358 * we can give.
359 359 */
360 360 continue;
361 361 }
362 362 }
363 363 mutex_enter(&p->dmem_lock);
364 364 if (*pminsize <= p->dmem_buf_size) {
365 365 bctl = p->dmem_bctl_free_list;
366 366 if (bctl == NULL) {
367 367 /* Someone took it. */
368 368 size_possible = 0;
369 369 mutex_exit(&p->dmem_lock);
370 370 continue;
371 371 }
372 372 p->dmem_bctl_free_list = bctl->bctl_next;
373 373 p->dmem_nbufs_free--;
374 374 mutex_exit(&p->dmem_lock);
375 375 bctl->bctl_buf->db_data_size = p->dmem_buf_size;
376 376 qlt->qlt_pmin_ok++;
377 377 return (bctl->bctl_buf);
378 378 }
379 379 }
380 380
381 381 *pminsize = size_possible;
382 382
383 383 return (NULL);
384 384 }
385 385
386 386 /* ARGSUSED */
387 387 void
388 388 qlt_i_dmem_free(qlt_state_t *qlt, stmf_data_buf_t *dbuf)
389 389 {
390 390 qlt_dmem_free(0, dbuf);
391 391 }
392 392
393 393 /* ARGSUSED */
394 394 void
395 395 qlt_dmem_free(fct_dbuf_store_t *fds, stmf_data_buf_t *dbuf)
396 396 {
397 397 qlt_dmem_bctl_t *bctl;
398 398 qlt_dmem_bucket_t *p;
399 399
400 400 ASSERT((dbuf->db_flags & DB_LU_DATA_BUF) == 0);
401 401
402 402 bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
403 403 p = bctl->bctl_bucket;
404 404 mutex_enter(&p->dmem_lock);
405 405 bctl->bctl_next = p->dmem_bctl_free_list;
406 406 p->dmem_bctl_free_list = bctl;
407 407 p->dmem_nbufs_free++;
408 408 mutex_exit(&p->dmem_lock);
409 409 }
410 410
411 411 void
412 412 qlt_dmem_dma_sync(stmf_data_buf_t *dbuf, uint_t sync_type)
413 413 {
414 414 qlt_dmem_bctl_t *bctl;
415 415 qlt_dma_sgl_t *qsgl;
416 416 qlt_dmem_bucket_t *p;
417 417 qlt_dma_handle_t *th;
418 418 int rv;
419 419
420 420 if (dbuf->db_flags & DB_LU_DATA_BUF) {
421 421 /*
422 422 * go through ddi handle list
423 423 */
424 424 qsgl = (qlt_dma_sgl_t *)dbuf->db_port_private;
425 425 th = qsgl->handle_list;
426 426 while (th) {
427 427 rv = ddi_dma_sync(th->dma_handle,
428 428 0, 0, sync_type);
429 429 if (rv != DDI_SUCCESS) {
430 430 cmn_err(CE_WARN, "ddi_dma_sync FAILED\n");
431 431 }
432 432 th = th->next;
433 433 }
434 434 } else {
435 435 bctl = (qlt_dmem_bctl_t *)dbuf->db_port_private;
436 436 p = bctl->bctl_bucket;
437 437 (void) ddi_dma_sync(p->dmem_dma_handle, (off_t)
438 438 (bctl->bctl_dev_addr - p->dmem_dev_addr),
439 439 dbuf->db_data_size, sync_type);
440 440 }
441 441 }
442 442
443 443 /*
444 444 * A very lite version of ddi_dma_addr_bind_handle()
445 445 */
446 446 uint64_t
447 447 qlt_ddi_vtop(caddr_t vaddr)
448 448 {
449 449 uint64_t offset, paddr;
450 450 pfn_t pfn;
451 451
452 452 pfn = hat_getpfnum(kas.a_hat, vaddr);
453 453 ASSERT(pfn != PFN_INVALID && pfn != PFN_SUSPENDED);
454 454 offset = ((uintptr_t)vaddr) & MMU_PAGEOFFSET;
455 455 paddr = mmu_ptob(pfn);
456 456 return (paddr+offset);
457 457 }
458 458
459 459 static ddi_dma_attr_t qlt_sgl_dma_attr = {
460 460 DMA_ATTR_V0, /* dma_attr_version */
461 461 0, /* low DMA address range */
462 462 0xffffffffffffffff, /* high DMA address range */
463 463 0xffffffff, /* DMA counter register */
464 464 64, /* DMA address alignment */
465 465 0xff, /* DMA burstsizes */
466 466 1, /* min effective DMA size */
467 467 0xffffffff, /* max DMA xfer size */
468 468 0xffffffff, /* segment boundary */
469 469 QLT_DMA_SG_LIST_LENGTH, /* s/g list length */
470 470 1, /* granularity of device */
471 471 0 /* DMA transfer flags */
472 472 };
473 473
474 474 /*
475 475 * Allocate a qlt_dma_handle container and fill it with a ddi_dma_handle
476 476 */
477 477 static qlt_dma_handle_t *
478 478 qlt_dma_alloc_handle(qlt_state_t *qlt)
479 479 {
480 480 ddi_dma_handle_t ddi_handle;
481 481 qlt_dma_handle_t *qlt_handle;
482 482 int rv;
483 483
484 484 rv = ddi_dma_alloc_handle(qlt->dip, &qlt_sgl_dma_attr,
485 485 DDI_DMA_SLEEP, 0, &ddi_handle);
486 486 if (rv != DDI_SUCCESS) {
487 487 EL(qlt, "ddi_dma_alloc_handle status=%xh\n", rv);
488 488 return (NULL);
489 489 }
490 490 qlt_handle = kmem_zalloc(sizeof (qlt_dma_handle_t), KM_SLEEP);
491 491 qlt_handle->dma_handle = ddi_handle;
492 492 return (qlt_handle);
493 493 }
494 494
495 495 /*
496 496 * Allocate a list of qlt_dma_handle containers from the free list
497 497 */
498 498 static qlt_dma_handle_t *
499 499 qlt_dma_alloc_handle_list(qlt_state_t *qlt, int handle_count)
500 500 {
501 501 qlt_dma_handle_pool_t *pool;
502 502 qlt_dma_handle_t *tmp_handle, *first_handle, *last_handle;
503 503 int i;
504 504
505 505 /*
506 506 * Make sure the free list can satisfy the request.
507 507 * Once the free list is primed, it should satisfy most requests.
508 508 * XXX Should there be a limit on pool size?
509 509 */
510 510 pool = qlt->qlt_dma_handle_pool;
511 511 mutex_enter(&pool->pool_lock);
512 512 while (handle_count > pool->num_free) {
513 513 mutex_exit(&pool->pool_lock);
514 514 if ((tmp_handle = qlt_dma_alloc_handle(qlt)) == NULL)
515 515 return (NULL);
516 516 mutex_enter(&pool->pool_lock);
517 517 tmp_handle->next = pool->free_list;
518 518 pool->free_list = tmp_handle;
519 519 pool->num_free++;
520 520 pool->num_total++;
521 521 }
522 522
523 523 /*
524 524 * The free list lock is held and the list is large enough to
525 525 * satisfy this request. Run down the freelist and snip off
526 526 * the number of elements needed for this request.
527 527 */
528 528 first_handle = pool->free_list;
529 529 tmp_handle = first_handle;
530 530 for (i = 0; i < handle_count; i++) {
531 531 last_handle = tmp_handle;
532 532 tmp_handle = tmp_handle->next;
533 533 }
534 534 pool->free_list = tmp_handle;
535 535 pool->num_free -= handle_count;
536 536 mutex_exit(&pool->pool_lock);
537 537 last_handle->next = NULL; /* sanity */
538 538 return (first_handle);
539 539 }
540 540
541 541 /*
542 542 * Return a list of qlt_dma_handle containers to the free list.
543 543 */
544 544 static void
545 545 qlt_dma_free_handles(qlt_state_t *qlt, qlt_dma_handle_t *first_handle)
546 546 {
547 547 qlt_dma_handle_pool_t *pool;
548 548 qlt_dma_handle_t *tmp_handle, *last_handle;
549 549 int rv, handle_count;
550 550
551 551 /*
552 552 * Traverse the list and unbind the handles
553 553 */
554 554 ASSERT(first_handle);
555 555 tmp_handle = first_handle;
|
↓ open down ↓ |
327 lines elided |
↑ open up ↑ |
556 556 handle_count = 0;
557 557 while (tmp_handle != NULL) {
558 558 last_handle = tmp_handle;
559 559 /*
560 560 * If the handle is bound, unbind the handle so it can be
561 561 * reused. It may not be bound if there was a bind failure.
562 562 */
563 563 if (tmp_handle->num_cookies != 0) {
564 564 rv = ddi_dma_unbind_handle(tmp_handle->dma_handle);
565 565 ASSERT(rv == DDI_SUCCESS);
566 - tmp_handle->num_cookies = 0;
567 - tmp_handle->num_cookies_fetched = 0;
566 + if (rv == DDI_SUCCESS) {
567 + tmp_handle->num_cookies = 0;
568 + tmp_handle->num_cookies_fetched = 0;
569 + }
568 570 }
569 571 tmp_handle = tmp_handle->next;
570 572 handle_count++;
571 573 }
572 574 /*
573 575 * Insert this list into the free list
574 576 */
575 577 pool = qlt->qlt_dma_handle_pool;
576 578 mutex_enter(&pool->pool_lock);
577 579 last_handle->next = pool->free_list;
578 580 pool->free_list = first_handle;
579 581 pool->num_free += handle_count;
580 582 mutex_exit(&pool->pool_lock);
581 583 }
582 584
583 585 /*
584 586 * cookies produced by mapping this dbuf
585 587 */
586 588 uint16_t
587 589 qlt_get_cookie_count(stmf_data_buf_t *dbuf)
588 590 {
589 591 qlt_dma_sgl_t *qsgl = dbuf->db_port_private;
590 592
591 593 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
592 594 return (qsgl->cookie_count);
593 595 }
594 596
595 597 ddi_dma_cookie_t
596 598 *qlt_get_cookie_array(stmf_data_buf_t *dbuf)
597 599 {
598 600 qlt_dma_sgl_t *qsgl = dbuf->db_port_private;
599 601
600 602 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
601 603
602 604 if (qsgl->cookie_prefetched)
603 605 return (&qsgl->cookie[0]);
604 606 else
605 607 return (NULL);
606 608 }
607 609
608 610 /*
609 611 * Wrapper around ddi_dma_nextcookie that hides the ddi_dma_handle usage.
610 612 */
611 613 void
612 614 qlt_ddi_dma_nextcookie(stmf_data_buf_t *dbuf, ddi_dma_cookie_t *cookiep)
613 615 {
614 616 qlt_dma_sgl_t *qsgl = dbuf->db_port_private;
615 617
616 618 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
617 619
618 620 if (qsgl->cookie_prefetched) {
619 621 ASSERT(qsgl->cookie_next_fetch < qsgl->cookie_count);
620 622 *cookiep = qsgl->cookie[qsgl->cookie_next_fetch++];
621 623 } else {
622 624 qlt_dma_handle_t *fetch;
623 625 qlt_dma_handle_t *FETCH_DONE = (qlt_dma_handle_t *)0xbad;
624 626
625 627 ASSERT(qsgl->handle_list != NULL);
626 628 ASSERT(qsgl->handle_next_fetch != FETCH_DONE);
627 629
628 630 fetch = qsgl->handle_next_fetch;
629 631 if (fetch->num_cookies_fetched == 0) {
630 632 *cookiep = fetch->first_cookie;
631 633 } else {
632 634 ddi_dma_nextcookie(fetch->dma_handle, cookiep);
633 635 }
634 636 if (++fetch->num_cookies_fetched == fetch->num_cookies) {
635 637 if (fetch->next == NULL)
636 638 qsgl->handle_next_fetch = FETCH_DONE;
637 639 else
638 640 qsgl->handle_next_fetch = fetch->next;
|
↓ open down ↓ |
61 lines elided |
↑ open up ↑ |
639 641 }
640 642 }
641 643 }
642 644
643 645 /*
644 646 * Set this flag to fetch the DDI dma cookies from the handles here and
645 647 * store them in the port private area of the dbuf. This will allow
646 648 * faster access to the cookies in qlt_xfer_scsi_data() at the expense of
647 649 * an extra copy. If the qlt->req_lock is hot, this may help.
648 650 */
649 -int qlt_sgl_prefetch = 0;
651 +uint16_t qlt_sgl_prefetch = 0;
650 652
651 653 /*ARGSUSED*/
652 654 stmf_status_t
653 655 qlt_dma_setup_dbuf(fct_local_port_t *port, stmf_data_buf_t *dbuf,
654 656 uint32_t flags)
655 657 {
656 658 qlt_state_t *qlt = port->port_fca_private;
657 659 qlt_dma_sgl_t *qsgl;
658 660 struct stmf_sglist_ent *sglp;
659 661 qlt_dma_handle_t *handle_list, *th;
660 662 int i, rv;
661 663 ddi_dma_cookie_t *cookie_p;
662 - int cookie_count, numbufs;
663 - int prefetch;
664 + int numbufs;
665 + uint16_t cookie_count;
666 + uint16_t prefetch;
664 667 size_t qsize;
665 668
666 669 /*
667 670 * psuedo code:
668 671 * get dma handle list from cache - one per sglist entry
669 672 * foreach sglist entry
670 673 * bind dma handle to sglist vaddr
671 674 * allocate space for DMA state to store in db_port_private
672 675 * fill in port private object
673 676 * if prefetching
674 677 * move all dma cookies into db_port_private
675 678 */
676 679 dbuf->db_port_private = NULL;
677 680 numbufs = dbuf->db_sglist_length;
678 681 handle_list = qlt_dma_alloc_handle_list(qlt, numbufs);
679 682 if (handle_list == NULL) {
680 683 EL(qlt, "handle_list==NULL\n");
681 684 return (STMF_FAILURE);
682 685 }
683 686 /*
684 687 * Loop through sglist and bind each entry to a handle
685 688 */
686 689 th = handle_list;
687 690 sglp = &dbuf->db_sglist[0];
688 691 cookie_count = 0;
689 692 for (i = 0; i < numbufs; i++, sglp++) {
690 693
691 694 /*
692 695 * Bind this sgl entry to a DDI dma handle
693 696 */
694 697 if ((rv = ddi_dma_addr_bind_handle(
695 698 th->dma_handle,
696 699 NULL,
697 700 (caddr_t)(sglp->seg_addr),
698 701 (size_t)sglp->seg_length,
699 702 DDI_DMA_RDWR | DDI_DMA_STREAMING,
700 703 DDI_DMA_DONTWAIT,
701 704 NULL,
702 705 &th->first_cookie,
703 706 &th->num_cookies)) != DDI_DMA_MAPPED) {
704 707 cmn_err(CE_NOTE, "ddi_dma_addr_bind_handle %d", rv);
705 708 qlt_dma_free_handles(qlt, handle_list);
706 709 return (STMF_FAILURE);
707 710 }
708 711
709 712 /*
710 713 * Add to total cookie count
711 714 */
712 715 cookie_count += th->num_cookies;
713 716 if (cookie_count > QLT_DMA_SG_LIST_LENGTH) {
714 717 /*
715 718 * Request exceeds HBA limit
716 719 */
717 720 qlt_dma_free_handles(qlt, handle_list);
718 721 return (STMF_FAILURE);
719 722 }
720 723 /* move to next ddi_dma_handle */
721 724 th = th->next;
722 725 }
723 726
724 727 /*
725 728 * Allocate our port private object for DMA mapping state.
726 729 */
727 730 prefetch = qlt_sgl_prefetch;
728 731 qsize = sizeof (qlt_dma_sgl_t);
729 732 if (prefetch) {
730 733 /* one extra ddi_dma_cookie allocated for alignment padding */
731 734 qsize += cookie_count * sizeof (ddi_dma_cookie_t);
732 735 }
733 736 qsgl = kmem_alloc(qsize, KM_SLEEP);
734 737 /*
735 738 * Fill in the sgl
736 739 */
737 740 dbuf->db_port_private = qsgl;
738 741 qsgl->qsize = qsize;
739 742 qsgl->handle_count = dbuf->db_sglist_length;
740 743 qsgl->cookie_prefetched = prefetch;
741 744 qsgl->cookie_count = cookie_count;
742 745 qsgl->cookie_next_fetch = 0;
743 746 qsgl->handle_list = handle_list;
744 747 qsgl->handle_next_fetch = handle_list;
745 748 if (prefetch) {
746 749 /*
747 750 * traverse handle list and move cookies to db_port_private
748 751 */
749 752 th = handle_list;
750 753 cookie_p = &qsgl->cookie[0];
751 754 for (i = 0; i < numbufs; i++) {
752 755 uint_t cc = th->num_cookies;
753 756
754 757 *cookie_p++ = th->first_cookie;
755 758 while (--cc > 0) {
756 759 ddi_dma_nextcookie(th->dma_handle, cookie_p++);
757 760 }
758 761 th->num_cookies_fetched = th->num_cookies;
759 762 th = th->next;
760 763 }
761 764 }
762 765
763 766 return (STMF_SUCCESS);
764 767 }
765 768
766 769 void
767 770 qlt_dma_teardown_dbuf(fct_dbuf_store_t *fds, stmf_data_buf_t *dbuf)
768 771 {
769 772 qlt_state_t *qlt = fds->fds_fca_private;
770 773 qlt_dma_sgl_t *qsgl = dbuf->db_port_private;
771 774
772 775 ASSERT(qlt);
773 776 ASSERT(qsgl);
774 777 ASSERT(dbuf->db_flags & DB_LU_DATA_BUF);
775 778
776 779 /*
777 780 * unbind and free the dma handles
778 781 */
779 782 if (qsgl->handle_list) {
780 783 /* go through ddi handle list */
781 784 qlt_dma_free_handles(qlt, qsgl->handle_list);
782 785 }
783 786 kmem_free(qsgl, qsgl->qsize);
784 787 }
785 788
786 789 uint8_t
787 790 qlt_get_iocb_count(uint32_t cookie_count)
788 791 {
789 792 uint32_t cnt, cont_segs;
790 793 uint8_t iocb_count;
791 794
792 795 iocb_count = 1;
793 796 cnt = CMD7_2400_DATA_SEGMENTS;
794 797 cont_segs = CONT_A64_DATA_SEGMENTS;
795 798
796 799 if (cookie_count > cnt) {
797 800 cnt = cookie_count - cnt;
798 801 iocb_count = (uint8_t)(iocb_count + cnt / cont_segs);
799 802 if (cnt % cont_segs) {
800 803 iocb_count++;
801 804 }
802 805 }
803 806 return (iocb_count);
804 807 }
|
↓ open down ↓ |
131 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX