Print this page
MFV: illumos-omnios@aea0472ecb9ee91fa70556d6f6a941c10c989f1d
Add support for Emulex Corporation Lancer Gen6: LPe32000 FC Host Adapter
Author: Andy Fiddaman <omnios@citrus-it.co.uk>
NEX-1878 update emlxs from source provided by Emulex
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_mem.c
+++ new/usr/src/uts/common/io/fibre-channel/fca/emlxs/emlxs_mem.c
1 1 /*
2 2 * CDDL HEADER START
3 3 *
4 4 * The contents of this file are subject to the terms of the
5 5 * Common Development and Distribution License (the "License").
6 6 * You may not use this file except in compliance with the License.
7 7 *
8 8 * You can obtain a copy of the license at
9 9 * http://www.opensource.org/licenses/cddl1.txt.
10 10 * See the License for the specific language governing permissions
11 11 * and limitations under the License.
12 12 *
13 13 * When distributing Covered Code, include this CDDL HEADER in each
14 14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
|
↓ open down ↓ |
14 lines elided |
↑ open up ↑ |
15 15 * If applicable, add the following below this CDDL HEADER, with the
16 16 * fields enclosed by brackets "[]" replaced with your own identifying
17 17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 18 *
19 19 * CDDL HEADER END
20 20 */
21 21
22 22 /*
23 23 * Copyright (c) 2004-2011 Emulex. All rights reserved.
24 24 * Use is subject to license terms.
25 + * Copyright 2018 OmniOS Community Edition (OmniOSce) Association.
25 26 */
26 27
27 28 #include <emlxs.h>
28 29
29 30 /* #define EMLXS_POOL_DEBUG */
30 31
31 32 EMLXS_MSG_DEF(EMLXS_MEM_C);
32 33
33 34
34 35 static uint32_t emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg,
35 36 uint32_t count);
36 37 static void emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count);
37 38
38 39
39 40 extern int32_t
40 41 emlxs_mem_alloc_buffer(emlxs_hba_t *hba)
41 42 {
42 43 emlxs_port_t *port = &PPORT;
43 44 emlxs_config_t *cfg;
44 45 MBUF_INFO *buf_info;
45 46 MEMSEG *seg;
46 47 MBUF_INFO bufinfo;
47 48 int32_t i;
48 49 MATCHMAP *mp;
49 50 MATCHMAP **bpl_table;
50 51
51 52 buf_info = &bufinfo;
52 53 cfg = &CFG;
53 54
54 55 bzero(hba->memseg, sizeof (hba->memseg));
55 56
56 57 /* Allocate the fc_table */
57 58 bzero(buf_info, sizeof (MBUF_INFO));
58 59 buf_info->size = (hba->max_iotag * sizeof (emlxs_buf_t *));
59 60
60 61 (void) emlxs_mem_alloc(hba, buf_info);
61 62 if (buf_info->virt == NULL) {
62 63
63 64 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
64 65 "fc_table buffer.");
65 66
66 67 goto failed;
67 68 }
68 69 hba->fc_table = buf_info->virt;
69 70 bzero(hba->fc_table, buf_info->size);
70 71
71 72 /* Prepare the memory pools */
72 73 for (i = 0; i < FC_MAX_SEG; i++) {
73 74 seg = &hba->memseg[i];
74 75
75 76 switch (i) {
76 77 case MEM_NLP:
77 78 (void) strlcpy(seg->fc_label, "Node Pool",
78 79 sizeof (seg->fc_label));
79 80 seg->fc_memtag = MEM_NLP;
80 81 seg->fc_memsize = sizeof (NODELIST);
81 82 seg->fc_hi_water = hba->max_nodes + 2;
82 83 seg->fc_lo_water = 2;
83 84 seg->fc_step = 1;
84 85 break;
85 86
86 87 case MEM_IOCB:
87 88 (void) strlcpy(seg->fc_label, "IOCB Pool",
88 89 sizeof (seg->fc_label));
89 90 seg->fc_memtag = MEM_IOCB;
90 91 seg->fc_memsize = sizeof (IOCBQ);
91 92 seg->fc_hi_water = cfg[CFG_NUM_IOCBS].current;
92 93 seg->fc_lo_water = cfg[CFG_NUM_IOCBS].low;
93 94 seg->fc_step = cfg[CFG_NUM_IOCBS].low;
94 95 break;
95 96
96 97 case MEM_MBOX:
97 98 (void) strlcpy(seg->fc_label, "MBOX Pool",
98 99 sizeof (seg->fc_label));
99 100 seg->fc_memtag = MEM_MBOX;
100 101 seg->fc_memsize = sizeof (MAILBOXQ);
101 102 seg->fc_hi_water = hba->max_nodes + 32;
102 103 seg->fc_lo_water = 32;
103 104 seg->fc_step = 1;
104 105 break;
105 106
106 107 case MEM_BPL:
107 108 if (hba->model_info.sli_mask & EMLXS_SLI4_MASK) {
108 109 continue;
109 110 }
110 111 (void) strlcpy(seg->fc_label, "BPL Pool",
111 112 sizeof (seg->fc_label));
112 113 seg->fc_memtag = MEM_BPL;
113 114 seg->fc_memsize = hba->sli.sli3.mem_bpl_size;
114 115 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
115 116 seg->fc_memalign = 32;
116 117 seg->fc_hi_water = hba->max_iotag;
117 118 seg->fc_lo_water = cfg[CFG_NUM_IOCBS].low;
118 119 seg->fc_step = cfg[CFG_NUM_IOCBS].low;
119 120 break;
120 121
121 122 case MEM_BUF:
122 123 /* These are the unsolicited ELS buffers. */
123 124 (void) strlcpy(seg->fc_label, "BUF Pool",
124 125 sizeof (seg->fc_label));
125 126 seg->fc_memtag = MEM_BUF;
126 127 seg->fc_memsize = MEM_BUF_SIZE;
127 128 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
128 129 seg->fc_memalign = 32;
129 130 seg->fc_hi_water = MEM_ELSBUF_COUNT + MEM_BUF_COUNT;
130 131 seg->fc_lo_water = MEM_ELSBUF_COUNT;
131 132 seg->fc_step = 1;
132 133 break;
133 134
134 135 case MEM_IPBUF:
135 136 /* These are the unsolicited IP buffers. */
136 137 if (cfg[CFG_NETWORK_ON].current == 0) {
137 138 continue;
138 139 }
139 140
140 141 (void) strlcpy(seg->fc_label, "IPBUF Pool",
141 142 sizeof (seg->fc_label));
142 143 seg->fc_memtag = MEM_IPBUF;
143 144 seg->fc_memsize = MEM_IPBUF_SIZE;
144 145 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
145 146 seg->fc_memalign = 32;
146 147 seg->fc_hi_water = MEM_IPBUF_COUNT;
147 148 seg->fc_lo_water = 0;
148 149 seg->fc_step = 4;
149 150 break;
150 151
151 152 case MEM_CTBUF:
152 153 /* These are the unsolicited CT buffers. */
153 154 (void) strlcpy(seg->fc_label, "CTBUF Pool",
|
↓ open down ↓ |
119 lines elided |
↑ open up ↑ |
154 155 sizeof (seg->fc_label));
155 156 seg->fc_memtag = MEM_CTBUF;
156 157 seg->fc_memsize = MEM_CTBUF_SIZE;
157 158 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
158 159 seg->fc_memalign = 32;
159 160 seg->fc_hi_water = MEM_CTBUF_COUNT;
160 161 seg->fc_lo_water = MEM_CTBUF_COUNT;
161 162 seg->fc_step = 1;
162 163 break;
163 164
165 + case MEM_SGL1K:
166 + (void) strlcpy(seg->fc_label, "1K SGL Pool",
167 + sizeof (seg->fc_label));
168 + seg->fc_memtag = MEM_SGL1K;
169 + seg->fc_memsize = 0x400;
170 + seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
171 + seg->fc_memalign = 32;
172 + seg->fc_hi_water = 0x5000;
173 + seg->fc_lo_water = 0;
174 + seg->fc_step = 0x100;
175 + break;
176 +
177 + case MEM_SGL2K:
178 + (void) strlcpy(seg->fc_label, "2K SGL Pool",
179 + sizeof (seg->fc_label));
180 + seg->fc_memtag = MEM_SGL2K;
181 + seg->fc_memsize = 0x800;
182 + seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
183 + seg->fc_memalign = 32;
184 + seg->fc_hi_water = 0x5000;
185 + seg->fc_lo_water = 0;
186 + seg->fc_step = 0x100;
187 + break;
188 +
189 + case MEM_SGL4K:
190 + (void) strlcpy(seg->fc_label, "4K SGL Pool",
191 + sizeof (seg->fc_label));
192 + seg->fc_memtag = MEM_SGL4K;
193 + seg->fc_memsize = 0x1000;
194 + seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
195 + seg->fc_memalign = 32;
196 + seg->fc_hi_water = 0x5000;
197 + seg->fc_lo_water = 0;
198 + seg->fc_step = 0x100;
199 + break;
200 +
164 201 #ifdef SFCT_SUPPORT
165 202 case MEM_FCTBUF:
166 203 /* These are the unsolicited FCT buffers. */
167 204 if (!(port->flag & EMLXS_TGT_ENABLED)) {
168 205 continue;
169 206 }
170 207
171 208 (void) strlcpy(seg->fc_label, "FCTBUF Pool",
172 209 sizeof (seg->fc_label));
173 210 seg->fc_memtag = MEM_FCTBUF;
174 211 seg->fc_memsize = MEM_FCTBUF_SIZE;
175 212 seg->fc_memflag = FC_MBUF_DMA | FC_MBUF_SNGLSG;
176 213 seg->fc_memalign = 32;
177 214 seg->fc_hi_water = MEM_FCTBUF_COUNT;
178 215 seg->fc_lo_water = 0;
179 216 seg->fc_step = 8;
180 217 break;
181 218 #endif /* SFCT_SUPPORT */
182 219
183 220 default:
184 221 continue;
185 222 }
186 223
187 224 if (seg->fc_memsize == 0) {
188 225 continue;
189 226 }
190 227
191 228 (void) emlxs_mem_pool_create(hba, seg);
192 229
193 230 if (seg->fc_numblks < seg->fc_lo_water) {
194 231 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
195 232 "%s: count=%d size=%d flags=%x lo=%d hi=%d",
196 233 seg->fc_label, seg->fc_numblks,
197 234 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
198 235 seg->fc_hi_water);
199 236
200 237 goto failed;
201 238 }
202 239 }
203 240
204 241 hba->sli.sli3.bpl_table = NULL;
205 242 seg = &hba->memseg[MEM_BPL];
206 243
207 244 /* If SLI3 and MEM_BPL pool is static */
208 245 if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK) &&
209 246 !(seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
210 247 /*
211 248 * Allocate and Initialize bpl_table
212 249 * This is for increased performance.
213 250 */
214 251 bzero(buf_info, sizeof (MBUF_INFO));
215 252 buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
216 253
217 254 (void) emlxs_mem_alloc(hba, buf_info);
218 255 if (buf_info->virt == NULL) {
219 256
220 257 EMLXS_MSGF(EMLXS_CONTEXT,
221 258 &emlxs_mem_alloc_failed_msg,
222 259 "BPL table buffer.");
223 260
224 261 goto failed;
225 262 }
226 263 hba->sli.sli3.bpl_table = buf_info->virt;
227 264
228 265 bpl_table = (MATCHMAP**)hba->sli.sli3.bpl_table;
229 266 for (i = 0; i < hba->max_iotag; i++) {
230 267 mp = (MATCHMAP *) emlxs_mem_get(hba, MEM_BPL);
231 268 mp->flag |= MAP_TABLE_ALLOCATED;
232 269 bpl_table[i] = mp;
233 270 }
234 271 }
235 272
236 273 return (1);
237 274
238 275 failed:
239 276
240 277 (void) emlxs_mem_free_buffer(hba);
241 278 return (0);
242 279
243 280 } /* emlxs_mem_alloc_buffer() */
244 281
245 282
246 283 /*
247 284 * emlxs_mem_free_buffer
248 285 *
249 286 * This routine will free iocb/data buffer space
250 287 * and TGTM resource.
251 288 */
252 289 extern int
253 290 emlxs_mem_free_buffer(emlxs_hba_t *hba)
254 291 {
255 292 emlxs_port_t *port = &PPORT;
256 293 emlxs_port_t *vport;
257 294 int32_t j;
258 295 MATCHMAP *mp;
259 296 CHANNEL *cp;
260 297 RING *rp;
261 298 MBUF_INFO *buf_info;
262 299 MBUF_INFO bufinfo;
263 300 MATCHMAP **bpl_table;
264 301
265 302 buf_info = &bufinfo;
266 303
267 304 for (j = 0; j < hba->chan_count; j++) {
268 305 cp = &hba->chan[j];
269 306
270 307 /* Flush the ring */
271 308 (void) emlxs_tx_channel_flush(hba, cp, 0);
272 309 }
273 310
274 311 if (!(hba->model_info.sli_mask & EMLXS_SLI4_MASK)) {
275 312 /* free the mapped address match area for each ring */
276 313 for (j = 0; j < MAX_RINGS; j++) {
277 314 rp = &hba->sli.sli3.ring[j];
278 315
279 316 while (rp->fc_mpoff) {
280 317 uint64_t addr;
281 318
282 319 addr = 0;
283 320 mp = (MATCHMAP *)(rp->fc_mpoff);
284 321
285 322 if ((j == hba->channel_els) ||
286 323 (j == hba->channel_ct) ||
287 324 #ifdef SFCT_SUPPORT
288 325 (j == hba->CHANNEL_FCT) ||
289 326 #endif /* SFCT_SUPPORT */
290 327 (j == hba->channel_ip)) {
291 328 addr = mp->phys;
292 329 }
293 330
294 331 if ((mp = emlxs_mem_get_vaddr(hba, rp, addr))) {
295 332 if (j == hba->channel_els) {
296 333 emlxs_mem_put(hba,
297 334 MEM_ELSBUF, (void *)mp);
298 335 } else if (j == hba->channel_ct) {
299 336 emlxs_mem_put(hba,
300 337 MEM_CTBUF, (void *)mp);
301 338 } else if (j == hba->channel_ip) {
302 339 emlxs_mem_put(hba,
303 340 MEM_IPBUF, (void *)mp);
304 341 }
305 342 #ifdef SFCT_SUPPORT
306 343 else if (j == hba->CHANNEL_FCT) {
307 344 emlxs_mem_put(hba,
308 345 MEM_FCTBUF, (void *)mp);
309 346 }
310 347 #endif /* SFCT_SUPPORT */
311 348
312 349 }
313 350 }
314 351 }
315 352 }
316 353
317 354 if (hba->flag & FC_HBQ_ENABLED) {
318 355 emlxs_hbq_free_all(hba, EMLXS_ELS_HBQ_ID);
319 356 emlxs_hbq_free_all(hba, EMLXS_IP_HBQ_ID);
320 357 emlxs_hbq_free_all(hba, EMLXS_CT_HBQ_ID);
321 358
322 359 if (port->flag & EMLXS_TGT_ENABLED) {
323 360 emlxs_hbq_free_all(hba, EMLXS_FCT_HBQ_ID);
324 361 }
325 362 }
326 363
327 364 /* Free the nodes */
328 365 for (j = 0; j < MAX_VPORTS; j++) {
329 366 vport = &VPORT(j);
330 367 if (vport->node_count) {
331 368 emlxs_node_destroy_all(vport);
332 369 }
333 370 }
334 371
335 372 /* Make sure the mailbox queue is empty */
336 373 emlxs_mb_flush(hba);
337 374
338 375 if (hba->fc_table) {
339 376 bzero(buf_info, sizeof (MBUF_INFO));
340 377 buf_info->size = hba->max_iotag * sizeof (emlxs_buf_t *);
341 378 buf_info->virt = hba->fc_table;
342 379 emlxs_mem_free(hba, buf_info);
343 380 hba->fc_table = NULL;
344 381 }
345 382
346 383 if (hba->sli.sli3.bpl_table) {
347 384 /* Return MEM_BPLs to their pool */
348 385 bpl_table = (MATCHMAP**)hba->sli.sli3.bpl_table;
349 386 for (j = 0; j < hba->max_iotag; j++) {
350 387 mp = bpl_table[j];
351 388 mp->flag &= ~MAP_TABLE_ALLOCATED;
352 389 emlxs_mem_put(hba, MEM_BPL, (void*)mp);
353 390 }
354 391
355 392 bzero(buf_info, sizeof (MBUF_INFO));
356 393 buf_info->size = hba->max_iotag * sizeof (MATCHMAP *);
357 394 buf_info->virt = hba->sli.sli3.bpl_table;
358 395 emlxs_mem_free(hba, buf_info);
359 396 hba->sli.sli3.bpl_table = NULL;
360 397 }
361 398
362 399 /* Free the memory segments */
363 400 for (j = 0; j < FC_MAX_SEG; j++) {
364 401 emlxs_mem_pool_destroy(hba, &hba->memseg[j]);
365 402 }
366 403
367 404 return (0);
368 405
369 406 } /* emlxs_mem_free_buffer() */
370 407
371 408
372 409 /* Must hold EMLXS_MEMGET_LOCK when calling */
373 410 static uint32_t
374 411 emlxs_mem_pool_alloc(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count)
375 412 {
376 413 emlxs_port_t *port = &PPORT;
377 414 uint8_t *bp = NULL;
378 415 MATCHMAP *mp = NULL;
379 416 MBUF_INFO *buf_info;
380 417 MBUF_INFO local_buf_info;
381 418 uint32_t i;
382 419 uint32_t fc_numblks;
383 420
384 421 if (seg->fc_memsize == 0) {
385 422 return (0);
386 423 }
387 424
388 425 if (seg->fc_numblks >= seg->fc_hi_water) {
389 426 return (0);
390 427 }
391 428
392 429 if (count == 0) {
393 430 return (0);
394 431 }
395 432
396 433 if (count > (seg->fc_hi_water - seg->fc_numblks)) {
397 434 count = (seg->fc_hi_water - seg->fc_numblks);
398 435 }
399 436
400 437 buf_info = &local_buf_info;
401 438 fc_numblks = seg->fc_numblks;
402 439
403 440 /* Check for initial allocation */
404 441 if (!(seg->fc_memflag & FC_MEMSEG_PUT_ENABLED)) {
405 442 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
406 443 "%s alloc:%d n=%d s=%d f=%x l=%d,%d,%d "
407 444 "f=%d:%d",
408 445 seg->fc_label, count, seg->fc_numblks,
409 446 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
410 447 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
411 448 seg->fc_low);
412 449 }
413 450
414 451 if (!(seg->fc_memflag & FC_MBUF_DMA)) {
415 452 goto vmem_pool;
416 453 }
417 454
418 455 /* dma_pool */
419 456
420 457 for (i = 0; i < count; i++) {
421 458 bzero(buf_info, sizeof (MBUF_INFO));
422 459 buf_info->size = sizeof (MATCHMAP);
423 460 buf_info->align = sizeof (void *);
424 461
425 462 (void) emlxs_mem_alloc(hba, buf_info);
426 463 if (buf_info->virt == NULL) {
427 464 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
428 465 "%s: count=%d size=%d",
429 466 seg->fc_label, seg->fc_numblks, seg->fc_memsize);
430 467
431 468 goto done;
432 469 }
433 470
434 471 mp = (MATCHMAP *)buf_info->virt;
435 472 bzero(mp, sizeof (MATCHMAP));
436 473
437 474 bzero(buf_info, sizeof (MBUF_INFO));
438 475 buf_info->size = seg->fc_memsize;
439 476 buf_info->flags = seg->fc_memflag;
440 477 buf_info->align = seg->fc_memalign;
441 478
442 479 (void) emlxs_mem_alloc(hba, buf_info);
443 480 if (buf_info->virt == NULL) {
444 481 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
445 482 "%s: count=%d size=%d",
446 483 seg->fc_label, seg->fc_numblks, seg->fc_memsize);
447 484
448 485 /* Free the mp object */
449 486 bzero(buf_info, sizeof (MBUF_INFO));
450 487 buf_info->size = sizeof (MATCHMAP);
451 488 buf_info->virt = (void *)mp;
452 489 emlxs_mem_free(hba, buf_info);
453 490
454 491 goto done;
455 492 }
456 493 bp = (uint8_t *)buf_info->virt;
457 494 bzero(bp, seg->fc_memsize);
458 495
459 496 mp->virt = buf_info->virt;
460 497 mp->phys = buf_info->phys;
461 498 mp->size = buf_info->size;
462 499 mp->dma_handle = buf_info->dma_handle;
463 500 mp->data_handle = buf_info->data_handle;
464 501 mp->tag = seg->fc_memtag;
465 502 mp->segment = seg;
466 503 mp->flag |= MAP_POOL_ALLOCATED;
467 504
468 505 #ifdef SFCT_SUPPORT
469 506 if (mp->tag >= MEM_FCTSEG) {
470 507 if (emlxs_fct_stmf_alloc(hba, mp)) {
471 508 /* Free the DMA memory itself */
472 509 emlxs_mem_free(hba, buf_info);
473 510
474 511 /* Free the mp object */
475 512 bzero(buf_info, sizeof (MBUF_INFO));
476 513 buf_info->size = sizeof (MATCHMAP);
477 514 buf_info->virt = (void *)mp;
478 515 emlxs_mem_free(hba, buf_info);
479 516
480 517 goto done;
481 518 }
482 519 }
483 520 #endif /* SFCT_SUPPORT */
484 521
485 522 /* Add the buffer desc to the tail of the pool freelist */
486 523 if (seg->fc_memget_end == NULL) {
487 524 seg->fc_memget_ptr = (uint8_t *)mp;
488 525 seg->fc_memget_cnt = 1;
489 526 } else {
490 527 *((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)mp;
491 528 seg->fc_memget_cnt++;
492 529 }
493 530 seg->fc_memget_end = (uint8_t *)mp;
494 531
495 532 seg->fc_numblks++;
496 533 seg->fc_total_memsize += (seg->fc_memsize + sizeof (MATCHMAP));
497 534 }
498 535
499 536 goto done;
500 537
501 538 vmem_pool:
502 539
503 540 for (i = 0; i < count; i++) {
504 541 bzero(buf_info, sizeof (MBUF_INFO));
505 542 buf_info->size = seg->fc_memsize;
506 543
507 544 (void) emlxs_mem_alloc(hba, buf_info);
508 545 if (buf_info->virt == NULL) {
509 546 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
510 547 "%s: count=%d size=%d",
511 548 seg->fc_label, seg->fc_numblks, seg->fc_memsize);
512 549
513 550 goto done;
514 551 }
515 552 bp = (uint8_t *)buf_info->virt;
516 553
517 554 /* Add the buffer to the tail of the pool freelist */
518 555 if (seg->fc_memget_end == NULL) {
519 556 seg->fc_memget_ptr = (uint8_t *)bp;
520 557 seg->fc_memget_cnt = 1;
521 558 } else {
522 559 *((uint8_t **)(seg->fc_memget_end)) = (uint8_t *)bp;
523 560 seg->fc_memget_cnt++;
524 561 }
525 562 seg->fc_memget_end = (uint8_t *)bp;
526 563
527 564 seg->fc_numblks++;
528 565 seg->fc_total_memsize += seg->fc_memsize;
529 566 }
530 567
531 568 done:
532 569
533 570 return ((seg->fc_numblks - fc_numblks));
534 571
535 572 } /* emlxs_mem_pool_alloc() */
536 573
537 574
538 575 /* Must hold EMLXS_MEMGET_LOCK & EMLXS_MEMPUT_LOCK when calling */
539 576 static void
540 577 emlxs_mem_pool_free(emlxs_hba_t *hba, MEMSEG *seg, uint32_t count)
541 578 {
542 579 emlxs_port_t *port = &PPORT;
543 580 uint8_t *bp = NULL;
544 581 MATCHMAP *mp = NULL;
545 582 MBUF_INFO *buf_info;
546 583 MBUF_INFO local_buf_info;
547 584
548 585 if ((seg->fc_memsize == 0) ||
549 586 (seg->fc_numblks == 0) ||
550 587 (count == 0)) {
551 588 return;
552 589 }
553 590
554 591 /* Check max count */
555 592 if (count > seg->fc_numblks) {
556 593 count = seg->fc_numblks;
557 594 }
558 595
559 596 /* Move memput list to memget list */
560 597 if (seg->fc_memput_ptr) {
561 598 if (seg->fc_memget_end == NULL) {
562 599 seg->fc_memget_ptr = seg->fc_memput_ptr;
563 600 } else {
564 601 *((uint8_t **)(seg->fc_memget_end)) =\
565 602 seg->fc_memput_ptr;
566 603 }
567 604 seg->fc_memget_end = seg->fc_memput_end;
568 605 seg->fc_memget_cnt += seg->fc_memput_cnt;
569 606
570 607 seg->fc_memput_ptr = NULL;
571 608 seg->fc_memput_end = NULL;
572 609 seg->fc_memput_cnt = 0;
573 610 }
574 611
575 612 buf_info = &local_buf_info;
576 613
577 614 /* Check for final deallocation */
578 615 if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
579 616 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
580 617 "%s free:%d n=%d s=%d f=%x l=%d,%d,%d "
581 618 "f=%d:%d",
582 619 seg->fc_label, count, seg->fc_numblks,
583 620 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
584 621 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
585 622 seg->fc_low);
586 623 }
587 624
588 625 if (!(seg->fc_memflag & FC_MBUF_DMA)) {
589 626 goto vmem_pool;
590 627 }
591 628
592 629 dma_pool:
593 630
594 631 /* Free memory associated with all buffers on get buffer pool */
595 632 while (count && ((bp = seg->fc_memget_ptr) != NULL)) {
596 633 /* Remove buffer from list */
597 634 if (seg->fc_memget_end == bp) {
598 635 seg->fc_memget_ptr = NULL;
599 636 seg->fc_memget_end = NULL;
600 637 seg->fc_memget_cnt = 0;
601 638
602 639 } else {
603 640 seg->fc_memget_ptr = *((uint8_t **)bp);
604 641 seg->fc_memget_cnt--;
605 642 }
606 643 mp = (MATCHMAP *)bp;
607 644
608 645 #ifdef SFCT_SUPPORT
609 646 if (mp->tag >= MEM_FCTSEG) {
610 647 emlxs_fct_stmf_free(hba, mp);
611 648 }
612 649 #endif /* SFCT_SUPPORT */
613 650
614 651 /* Free the DMA memory itself */
615 652 bzero(buf_info, sizeof (MBUF_INFO));
616 653 buf_info->size = mp->size;
617 654 buf_info->virt = mp->virt;
618 655 buf_info->phys = mp->phys;
619 656 buf_info->dma_handle = mp->dma_handle;
620 657 buf_info->data_handle = mp->data_handle;
621 658 buf_info->flags = seg->fc_memflag;
622 659 emlxs_mem_free(hba, buf_info);
623 660
624 661 /* Free the handle */
625 662 bzero(buf_info, sizeof (MBUF_INFO));
626 663 buf_info->size = sizeof (MATCHMAP);
627 664 buf_info->virt = (void *)mp;
628 665 emlxs_mem_free(hba, buf_info);
629 666
630 667 seg->fc_numblks--;
631 668 seg->fc_total_memsize -= (seg->fc_memsize + sizeof (MATCHMAP));
632 669
633 670 count--;
634 671 }
635 672
636 673 return;
637 674
638 675 vmem_pool:
639 676
640 677 /* Free memory associated with all buffers on get buffer pool */
641 678 while (count && ((bp = seg->fc_memget_ptr) != NULL)) {
642 679 /* Remove buffer from list */
643 680 if (seg->fc_memget_end == bp) {
644 681 seg->fc_memget_ptr = NULL;
645 682 seg->fc_memget_end = NULL;
646 683 seg->fc_memget_cnt = 0;
647 684
648 685 } else {
649 686 seg->fc_memget_ptr = *((uint8_t **)bp);
650 687 seg->fc_memget_cnt--;
651 688 }
652 689
653 690 /* Free the Virtual memory itself */
654 691 bzero(buf_info, sizeof (MBUF_INFO));
655 692 buf_info->size = seg->fc_memsize;
656 693 buf_info->virt = bp;
657 694 emlxs_mem_free(hba, buf_info);
658 695
659 696 seg->fc_numblks--;
660 697 seg->fc_total_memsize -= seg->fc_memsize;
661 698
662 699 count--;
663 700 }
664 701
665 702 return;
666 703
667 704 } /* emlxs_mem_pool_free() */
668 705
669 706
670 707 extern uint32_t
671 708 emlxs_mem_pool_create(emlxs_hba_t *hba, MEMSEG *seg)
672 709 {
673 710 emlxs_config_t *cfg = &CFG;
674 711
675 712 mutex_enter(&EMLXS_MEMGET_LOCK);
676 713 mutex_enter(&EMLXS_MEMPUT_LOCK);
677 714
678 715 if (seg->fc_memsize == 0) {
679 716 mutex_exit(&EMLXS_MEMPUT_LOCK);
680 717 mutex_exit(&EMLXS_MEMGET_LOCK);
681 718
682 719 return (0);
683 720 }
684 721
685 722 /* Sanity check hi > lo */
686 723 if (seg->fc_lo_water > seg->fc_hi_water) {
687 724 seg->fc_hi_water = seg->fc_lo_water;
688 725 }
689 726
690 727 /* If dynamic pools are disabled, then force pool to max level */
691 728 if (cfg[CFG_MEM_DYNAMIC].current == 0) {
692 729 seg->fc_lo_water = seg->fc_hi_water;
693 730 }
694 731
695 732 /* If pool is dynamic, then fc_step must be >0 */
696 733 /* Otherwise, fc_step must be 0 */
697 734 if (seg->fc_lo_water != seg->fc_hi_water) {
698 735 seg->fc_memflag |= FC_MEMSEG_DYNAMIC;
699 736
700 737 if (seg->fc_step == 0) {
|
↓ open down ↓ |
527 lines elided |
↑ open up ↑ |
701 738 seg->fc_step = 1;
702 739 }
703 740 } else {
704 741 seg->fc_step = 0;
705 742 }
706 743
707 744 seg->fc_numblks = 0;
708 745 seg->fc_total_memsize = 0;
709 746 seg->fc_low = 0;
710 747
711 - (void) emlxs_mem_pool_alloc(hba, seg, seg->fc_lo_water);
748 + (void) emlxs_mem_pool_alloc(hba, seg, seg->fc_lo_water);
712 749
713 750 seg->fc_memflag |= (FC_MEMSEG_PUT_ENABLED|FC_MEMSEG_GET_ENABLED);
714 751
715 752 mutex_exit(&EMLXS_MEMPUT_LOCK);
716 753 mutex_exit(&EMLXS_MEMGET_LOCK);
717 754
718 755 return (seg->fc_numblks);
719 756
720 757 } /* emlxs_mem_pool_create() */
721 758
722 759
723 760 extern void
724 761 emlxs_mem_pool_destroy(emlxs_hba_t *hba, MEMSEG *seg)
725 762 {
726 763 emlxs_port_t *port = &PPORT;
727 764
728 765 mutex_enter(&EMLXS_MEMGET_LOCK);
729 766 mutex_enter(&EMLXS_MEMPUT_LOCK);
730 767
731 768 if (seg->fc_memsize == 0) {
732 769 mutex_exit(&EMLXS_MEMPUT_LOCK);
733 770 mutex_exit(&EMLXS_MEMGET_LOCK);
734 771 return;
735 772 }
736 773
737 774 /* Leave FC_MEMSEG_PUT_ENABLED set for now */
738 775 seg->fc_memflag &= ~FC_MEMSEG_GET_ENABLED;
739 776
740 777 /* Try to free all objects */
741 778 emlxs_mem_pool_free(hba, seg, seg->fc_numblks);
742 779
743 780 if (seg->fc_numblks) {
744 781 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
745 782 "mem_pool_destroy: %s leak detected: "
746 783 "%d objects still allocated.",
747 784 seg->fc_label, seg->fc_numblks);
748 785 } else {
749 786 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
750 787 "mem_pool_destroy: %s destroyed.",
751 788 seg->fc_label);
752 789
753 790 /* Clear all */
754 791 bzero(seg, sizeof (MEMSEG));
755 792 }
756 793
757 794 mutex_exit(&EMLXS_MEMPUT_LOCK);
758 795 mutex_exit(&EMLXS_MEMGET_LOCK);
759 796
760 797 return;
761 798
762 799 } /* emlxs_mem_pool_destroy() */
763 800
764 801
765 802 extern void
766 803 emlxs_mem_pool_clean(emlxs_hba_t *hba, MEMSEG *seg)
767 804 {
768 805 emlxs_port_t *port = &PPORT;
769 806 uint32_t clean_count;
770 807 uint32_t free_count;
771 808 uint32_t free_pad;
772 809
773 810 mutex_enter(&EMLXS_MEMGET_LOCK);
774 811 mutex_enter(&EMLXS_MEMPUT_LOCK);
775 812
776 813 if (!(seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
777 814 mutex_exit(&EMLXS_MEMPUT_LOCK);
778 815 mutex_exit(&EMLXS_MEMGET_LOCK);
779 816 return;
780 817 }
781 818
782 819 if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
783 820 goto done;
784 821 }
785 822
786 823 #ifdef EMLXS_POOL_DEBUG
787 824 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
788 825 "%s clean: n=%d s=%d f=%x l=%d,%d,%d "
789 826 "f=%d:%d",
790 827 seg->fc_label, seg->fc_numblks,
791 828 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
792 829 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
793 830 seg->fc_low);
794 831 #endif /* EMLXS_POOL_DEBUG */
795 832
796 833 /* Calculatge current free count */
797 834 free_count = (seg->fc_memget_cnt + seg->fc_memput_cnt);
798 835
799 836 /* Reset fc_low value to current free count */
800 837 clean_count = seg->fc_low;
801 838 seg->fc_low = free_count;
802 839
803 840 /* Return if pool is already at lo water mark */
804 841 if (seg->fc_numblks <= seg->fc_lo_water) {
805 842 goto done;
806 843 }
807 844
808 845 /* Return if there is nothing to clean */
809 846 if ((free_count == 0) ||
810 847 (clean_count <= 1)) {
811 848 goto done;
812 849 }
813 850
814 851 /* Calculate a 3 percent free pad count (1 being minimum) */
815 852 if (seg->fc_numblks > 66) {
816 853 free_pad = ((seg->fc_numblks * 3)/100);
817 854 } else {
818 855 free_pad = 1;
819 856 }
820 857
821 858 /* Return if fc_low is below pool free pad */
822 859 if (clean_count <= free_pad) {
823 860 goto done;
824 861 }
825 862
826 863 clean_count -= free_pad;
827 864
828 865 /* clean_count can't exceed minimum pool levels */
829 866 if (clean_count > (seg->fc_numblks - seg->fc_lo_water)) {
830 867 clean_count = (seg->fc_numblks - seg->fc_lo_water);
831 868 }
832 869
833 870 emlxs_mem_pool_free(hba, seg, clean_count);
834 871
835 872 done:
836 873 if (seg->fc_last != seg->fc_numblks) {
837 874 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_detail_msg,
838 875 "%s update: n=%d->%d s=%d f=%x l=%d,%d,%d "
839 876 "f=%d:%d",
840 877 seg->fc_label, seg->fc_last, seg->fc_numblks,
841 878 seg->fc_memsize, seg->fc_memflag, seg->fc_lo_water,
842 879 seg->fc_hi_water, seg->fc_step, seg->fc_memget_cnt,
843 880 seg->fc_low);
844 881
845 882 seg->fc_last = seg->fc_numblks;
846 883 }
847 884
848 885 mutex_exit(&EMLXS_MEMPUT_LOCK);
849 886 mutex_exit(&EMLXS_MEMGET_LOCK);
850 887 return;
851 888
852 889 } /* emlxs_mem_pool_clean() */
853 890
854 891
855 892 extern void *
856 893 emlxs_mem_pool_get(emlxs_hba_t *hba, MEMSEG *seg)
857 894 {
858 895 emlxs_port_t *port = &PPORT;
859 896 void *bp = NULL;
860 897 MATCHMAP *mp;
861 898 uint32_t free_count;
862 899
863 900 mutex_enter(&EMLXS_MEMGET_LOCK);
864 901
865 902 /* Check if memory pool is GET enabled */
866 903 if (!(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
867 904 mutex_exit(&EMLXS_MEMGET_LOCK);
868 905 return (NULL);
869 906 }
870 907
871 908 /* If no entries on memget list, then check memput list */
872 909 if (!seg->fc_memget_ptr) {
873 910 mutex_enter(&EMLXS_MEMPUT_LOCK);
874 911 if (seg->fc_memput_ptr) {
875 912 /*
876 913 * Move list from memput to memget
877 914 */
878 915 seg->fc_memget_ptr = seg->fc_memput_ptr;
879 916 seg->fc_memget_end = seg->fc_memput_end;
880 917 seg->fc_memget_cnt = seg->fc_memput_cnt;
881 918 seg->fc_memput_ptr = NULL;
882 919 seg->fc_memput_end = NULL;
883 920 seg->fc_memput_cnt = 0;
884 921 }
885 922 mutex_exit(&EMLXS_MEMPUT_LOCK);
886 923 }
887 924
888 925 /* If no entries on memget list, then pool is empty */
889 926 /* Try to allocate more if pool is dynamic */
890 927 if (!seg->fc_memget_ptr &&
891 928 (seg->fc_memflag & FC_MEMSEG_DYNAMIC)) {
892 929 (void) emlxs_mem_pool_alloc(hba, seg, seg->fc_step);
893 930 seg->fc_low = 0;
894 931 }
895 932
896 933 /* If no entries on memget list, then pool is empty */
897 934 if (!seg->fc_memget_ptr) {
898 935 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_alloc_failed_msg,
899 936 "%s empty.", seg->fc_label);
900 937
901 938 mutex_exit(&EMLXS_MEMGET_LOCK);
902 939 return (NULL);
903 940 }
904 941
905 942 /* Remove an entry from the get list */
906 943 bp = seg->fc_memget_ptr;
907 944
908 945 if (seg->fc_memget_end == bp) {
909 946 seg->fc_memget_ptr = NULL;
910 947 seg->fc_memget_end = NULL;
911 948 seg->fc_memget_cnt = 0;
912 949
913 950 } else {
914 951 seg->fc_memget_ptr = *((uint8_t **)bp);
915 952 seg->fc_memget_cnt--;
916 953 }
917 954
918 955 /* Initialize buffer */
919 956 if (!(seg->fc_memflag & FC_MBUF_DMA)) {
920 957 bzero(bp, seg->fc_memsize);
921 958 } else {
922 959 mp = (MATCHMAP *)bp;
923 960 mp->fc_mptr = NULL;
924 961 mp->flag |= MAP_POOL_ALLOCATED;
925 962 }
926 963
927 964 /* Set fc_low if pool is dynamic */
928 965 if (seg->fc_memflag & FC_MEMSEG_DYNAMIC) {
929 966 free_count = (seg->fc_memget_cnt + seg->fc_memput_cnt);
930 967 if (free_count < seg->fc_low) {
931 968 seg->fc_low = free_count;
932 969 }
933 970 }
934 971
935 972 mutex_exit(&EMLXS_MEMGET_LOCK);
936 973
937 974 return (bp);
938 975
939 976 } /* emlxs_mem_pool_get() */
940 977
941 978
942 979 extern void
943 980 emlxs_mem_pool_put(emlxs_hba_t *hba, MEMSEG *seg, void *bp)
944 981 {
945 982 emlxs_port_t *port = &PPORT;
946 983 MATCHMAP *mp;
947 984
948 985 /* Free the pool object */
949 986 mutex_enter(&EMLXS_MEMPUT_LOCK);
950 987
951 988 /* Check if memory pool is PUT enabled */
952 989 if (!(seg->fc_memflag & FC_MEMSEG_PUT_ENABLED)) {
953 990 mutex_exit(&EMLXS_MEMPUT_LOCK);
954 991 return;
955 992 }
956 993
957 994 /* Check if buffer was just freed */
958 995 if ((seg->fc_memput_end == bp) || (seg->fc_memget_end == bp)) {
959 996 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
960 997 "%s: Freeing free object: bp=%p", seg->fc_label, bp);
961 998
962 999 mutex_exit(&EMLXS_MEMPUT_LOCK);
963 1000 return;
964 1001 }
965 1002
966 1003 /* Validate DMA buffer */
967 1004 if (seg->fc_memflag & FC_MBUF_DMA) {
968 1005 mp = (MATCHMAP *)bp;
969 1006
970 1007 if (!(mp->flag & MAP_POOL_ALLOCATED) ||
971 1008 (mp->segment != seg)) {
972 1009 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
973 1010 "mem_pool_put: %s invalid: mp=%p " \
974 1011 "tag=0x%x flag=%x", seg->fc_label,
975 1012 mp, mp->tag, mp->flag);
976 1013
977 1014 EMLXS_STATE_CHANGE(hba, FC_ERROR);
978 1015
979 1016 mutex_exit(&EMLXS_MEMPUT_LOCK);
980 1017
981 1018 emlxs_thread_spawn(hba, emlxs_shutdown_thread,
982 1019 NULL, NULL);
983 1020
984 1021 return;
985 1022 }
986 1023 }
987 1024
988 1025 /* Release buffer to the end of the memput list */
989 1026 if (seg->fc_memput_end == NULL) {
990 1027 seg->fc_memput_ptr = bp;
991 1028 seg->fc_memput_cnt = 1;
992 1029 } else {
993 1030 *((void **)(seg->fc_memput_end)) = bp;
994 1031 seg->fc_memput_cnt++;
995 1032 }
996 1033 seg->fc_memput_end = bp;
997 1034 *((void **)(bp)) = NULL;
998 1035
999 1036 mutex_exit(&EMLXS_MEMPUT_LOCK);
1000 1037
1001 1038 /* This is for late PUT's after an initial */
1002 1039 /* emlxs_mem_pool_destroy call */
1003 1040 if ((seg->fc_memflag & FC_MEMSEG_PUT_ENABLED) &&
1004 1041 !(seg->fc_memflag & FC_MEMSEG_GET_ENABLED)) {
1005 1042 emlxs_mem_pool_destroy(hba, seg);
1006 1043 }
1007 1044
1008 1045 return;
1009 1046
1010 1047 } /* emlxs_mem_pool_put() */
1011 1048
1012 1049
1013 1050 extern MATCHMAP *
1014 1051 emlxs_mem_buf_alloc(emlxs_hba_t *hba, uint32_t size)
1015 1052 {
1016 1053 emlxs_port_t *port = &PPORT;
1017 1054 uint8_t *bp = NULL;
1018 1055 MATCHMAP *mp = NULL;
1019 1056 MBUF_INFO *buf_info;
1020 1057 MBUF_INFO bufinfo;
1021 1058
1022 1059 buf_info = &bufinfo;
1023 1060
1024 1061 bzero(buf_info, sizeof (MBUF_INFO));
1025 1062 buf_info->size = sizeof (MATCHMAP);
1026 1063 buf_info->align = sizeof (void *);
1027 1064
1028 1065 (void) emlxs_mem_alloc(hba, buf_info);
1029 1066 if (buf_info->virt == NULL) {
1030 1067 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1031 1068 "MEM_BUF_ALLOC buffer.");
1032 1069
1033 1070 return (NULL);
1034 1071 }
1035 1072
1036 1073 mp = (MATCHMAP *)buf_info->virt;
1037 1074 bzero(mp, sizeof (MATCHMAP));
1038 1075
1039 1076 bzero(buf_info, sizeof (MBUF_INFO));
1040 1077 buf_info->size = size;
1041 1078 buf_info->flags = FC_MBUF_DMA | FC_MBUF_SNGLSG | FC_MBUF_DMA32;
1042 1079 buf_info->align = 32;
1043 1080
1044 1081 (void) emlxs_mem_alloc(hba, buf_info);
1045 1082 if (buf_info->virt == NULL) {
|
↓ open down ↓ |
324 lines elided |
↑ open up ↑ |
1046 1083
1047 1084 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_failed_msg,
1048 1085 "MEM_BUF_ALLOC DMA buffer.");
1049 1086
1050 1087 /* Free the mp object */
1051 1088 bzero(buf_info, sizeof (MBUF_INFO));
1052 1089 buf_info->size = sizeof (MATCHMAP);
1053 1090 buf_info->virt = (void *)mp;
1054 1091 emlxs_mem_free(hba, buf_info);
1055 1092
1056 - return (0);
1093 + return (NULL);
1057 1094 }
1058 1095 bp = (uint8_t *)buf_info->virt;
1059 1096 bzero(bp, buf_info->size);
1060 1097
1061 1098 mp->virt = buf_info->virt;
1062 1099 mp->phys = buf_info->phys;
1063 1100 mp->size = buf_info->size;
1064 1101 mp->dma_handle = buf_info->dma_handle;
1065 1102 mp->data_handle = buf_info->data_handle;
1066 1103 mp->tag = MEM_BUF;
1067 1104 mp->flag |= MAP_BUF_ALLOCATED;
1068 1105
1069 1106 return (mp);
1070 1107
1071 1108 } /* emlxs_mem_buf_alloc() */
1072 1109
1073 1110
1074 1111 extern void
1075 1112 emlxs_mem_buf_free(emlxs_hba_t *hba, MATCHMAP *mp)
1076 1113 {
1077 1114 MBUF_INFO bufinfo;
1078 1115 MBUF_INFO *buf_info;
1079 1116
1080 1117 buf_info = &bufinfo;
1081 1118
1082 1119 if (!(mp->flag & MAP_BUF_ALLOCATED)) {
1083 1120 return;
1084 1121 }
1085 1122
1086 1123 bzero(buf_info, sizeof (MBUF_INFO));
1087 1124 buf_info->size = mp->size;
1088 1125 buf_info->virt = mp->virt;
1089 1126 buf_info->phys = mp->phys;
1090 1127 buf_info->dma_handle = mp->dma_handle;
1091 1128 buf_info->data_handle = mp->data_handle;
1092 1129 buf_info->flags = FC_MBUF_DMA;
1093 1130 emlxs_mem_free(hba, buf_info);
1094 1131
1095 1132 bzero(buf_info, sizeof (MBUF_INFO));
1096 1133 buf_info->size = sizeof (MATCHMAP);
1097 1134 buf_info->virt = (void *)mp;
1098 1135 emlxs_mem_free(hba, buf_info);
1099 1136
1100 1137 return;
1101 1138
1102 1139 } /* emlxs_mem_buf_free() */
1103 1140
1104 1141
1105 1142 extern void *
1106 1143 emlxs_mem_get(emlxs_hba_t *hba, uint32_t seg_id)
1107 1144 {
1108 1145 emlxs_port_t *port = &PPORT;
1109 1146 void *bp;
1110 1147 MAILBOXQ *mbq;
1111 1148 IOCBQ *iocbq;
1112 1149 NODELIST *node;
1113 1150 MEMSEG *seg;
1114 1151
1115 1152 if (seg_id >= FC_MAX_SEG) {
1116 1153
1117 1154 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1118 1155 "mem_get: Invalid segment id = %d",
1119 1156 seg_id);
1120 1157
1121 1158 return (NULL);
1122 1159 }
1123 1160 seg = &hba->memseg[seg_id];
1124 1161
1125 1162 /* Alloc a buffer from the pool */
1126 1163 bp = emlxs_mem_pool_get(hba, seg);
1127 1164
1128 1165 if (bp) {
1129 1166 switch (seg_id) {
1130 1167 case MEM_MBOX:
1131 1168 mbq = (MAILBOXQ *)bp;
1132 1169 mbq->flag |= MBQ_POOL_ALLOCATED;
1133 1170 break;
1134 1171
1135 1172 case MEM_IOCB:
1136 1173 iocbq = (IOCBQ *)bp;
1137 1174 iocbq->flag |= IOCB_POOL_ALLOCATED;
1138 1175 break;
1139 1176
1140 1177 case MEM_NLP:
1141 1178 node = (NODELIST *)bp;
1142 1179 node->flag |= NODE_POOL_ALLOCATED;
1143 1180 break;
1144 1181 }
1145 1182 }
1146 1183
1147 1184 return (bp);
1148 1185
1149 1186 } /* emlxs_mem_get() */
1150 1187
1151 1188
1152 1189 extern void
1153 1190 emlxs_mem_put(emlxs_hba_t *hba, uint32_t seg_id, void *bp)
1154 1191 {
1155 1192 emlxs_port_t *port = &PPORT;
1156 1193 MAILBOXQ *mbq;
1157 1194 IOCBQ *iocbq;
1158 1195 NODELIST *node;
1159 1196 MEMSEG *seg;
1160 1197 MATCHMAP *mp;
1161 1198
1162 1199 if (seg_id >= FC_MAX_SEG) {
1163 1200
1164 1201 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1165 1202 "mem_put: Invalid segment id = %d: bp=%p",
1166 1203 seg_id, bp);
1167 1204
1168 1205 return;
1169 1206 }
1170 1207 seg = &hba->memseg[seg_id];
1171 1208
1172 1209 /* Verify buffer */
1173 1210 switch (seg_id) {
1174 1211 case MEM_MBOX:
1175 1212 mbq = (MAILBOXQ *)bp;
1176 1213
1177 1214 if (!(mbq->flag & MBQ_POOL_ALLOCATED)) {
1178 1215 return;
1179 1216 }
1180 1217 break;
1181 1218
1182 1219 case MEM_IOCB:
1183 1220 iocbq = (IOCBQ *)bp;
1184 1221
1185 1222 if (!(iocbq->flag & IOCB_POOL_ALLOCATED)) {
1186 1223 return;
1187 1224 }
1188 1225
1189 1226 /* Any IOCBQ with a packet attached did not come */
1190 1227 /* from our pool */
1191 1228 if (iocbq->sbp) {
1192 1229 return;
1193 1230 }
1194 1231 break;
1195 1232
1196 1233 case MEM_NLP:
1197 1234 node = (NODELIST *)bp;
1198 1235
1199 1236 if (!(node->flag & NODE_POOL_ALLOCATED)) {
1200 1237 return;
1201 1238 }
1202 1239 break;
1203 1240
1204 1241 default:
1205 1242 mp = (MATCHMAP *)bp;
1206 1243
1207 1244 if (mp->flag & MAP_BUF_ALLOCATED) {
1208 1245 emlxs_mem_buf_free(hba, mp);
1209 1246 return;
1210 1247 }
1211 1248
1212 1249 if (mp->flag & MAP_TABLE_ALLOCATED) {
1213 1250 return;
1214 1251 }
1215 1252
1216 1253 if (!(mp->flag & MAP_POOL_ALLOCATED)) {
1217 1254 return;
1218 1255 }
1219 1256 break;
1220 1257 }
1221 1258
1222 1259 /* Free a buffer to the pool */
1223 1260 emlxs_mem_pool_put(hba, seg, bp);
1224 1261
1225 1262 return;
1226 1263
1227 1264 } /* emlxs_mem_put() */
1228 1265
1229 1266
1230 1267 /*
1231 1268 * Look up the virtual address given a mapped address
1232 1269 */
1233 1270 /* SLI3 */
1234 1271 extern MATCHMAP *
1235 1272 emlxs_mem_get_vaddr(emlxs_hba_t *hba, RING *rp, uint64_t mapbp)
1236 1273 {
1237 1274 emlxs_port_t *port = &PPORT;
1238 1275 MATCHMAP *prev;
1239 1276 MATCHMAP *mp;
1240 1277
1241 1278 if (rp->ringno == hba->channel_els) {
1242 1279 mp = (MATCHMAP *)rp->fc_mpoff;
1243 1280 prev = 0;
1244 1281
1245 1282 while (mp) {
1246 1283 if (mp->phys == mapbp) {
1247 1284 if (prev == 0) {
1248 1285 rp->fc_mpoff = mp->fc_mptr;
1249 1286 } else {
1250 1287 prev->fc_mptr = mp->fc_mptr;
1251 1288 }
1252 1289
1253 1290 if (rp->fc_mpon == mp) {
1254 1291 rp->fc_mpon = (void *)prev;
1255 1292 }
1256 1293
1257 1294 mp->fc_mptr = NULL;
1258 1295
1259 1296 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1260 1297 DDI_DMA_SYNC_FORKERNEL);
1261 1298
1262 1299 HBASTATS.ElsUbPosted--;
1263 1300
1264 1301 return (mp);
1265 1302 }
1266 1303
1267 1304 prev = mp;
1268 1305 mp = (MATCHMAP *)mp->fc_mptr;
1269 1306 }
1270 1307
1271 1308 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1272 1309 "ELS Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1273 1310 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1274 1311
1275 1312 } else if (rp->ringno == hba->channel_ct) {
1276 1313
1277 1314 mp = (MATCHMAP *)rp->fc_mpoff;
1278 1315 prev = 0;
1279 1316
1280 1317 while (mp) {
1281 1318 if (mp->phys == mapbp) {
1282 1319 if (prev == 0) {
1283 1320 rp->fc_mpoff = mp->fc_mptr;
1284 1321 } else {
1285 1322 prev->fc_mptr = mp->fc_mptr;
1286 1323 }
1287 1324
1288 1325 if (rp->fc_mpon == mp) {
1289 1326 rp->fc_mpon = (void *)prev;
1290 1327 }
1291 1328
1292 1329 mp->fc_mptr = NULL;
1293 1330
1294 1331 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1295 1332 DDI_DMA_SYNC_FORKERNEL);
1296 1333
1297 1334 HBASTATS.CtUbPosted--;
1298 1335
1299 1336 return (mp);
1300 1337 }
1301 1338
1302 1339 prev = mp;
1303 1340 mp = (MATCHMAP *)mp->fc_mptr;
1304 1341 }
1305 1342
1306 1343 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1307 1344 "CT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1308 1345 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1309 1346
1310 1347 } else if (rp->ringno == hba->channel_ip) {
1311 1348
1312 1349 mp = (MATCHMAP *)rp->fc_mpoff;
1313 1350 prev = 0;
1314 1351
1315 1352 while (mp) {
1316 1353 if (mp->phys == mapbp) {
1317 1354 if (prev == 0) {
1318 1355 rp->fc_mpoff = mp->fc_mptr;
1319 1356 } else {
1320 1357 prev->fc_mptr = mp->fc_mptr;
1321 1358 }
1322 1359
1323 1360 if (rp->fc_mpon == mp) {
1324 1361 rp->fc_mpon = (void *)prev;
1325 1362 }
1326 1363
1327 1364 mp->fc_mptr = NULL;
1328 1365
1329 1366 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1330 1367 DDI_DMA_SYNC_FORKERNEL);
1331 1368
1332 1369 HBASTATS.IpUbPosted--;
1333 1370
1334 1371 return (mp);
1335 1372 }
1336 1373
1337 1374 prev = mp;
1338 1375 mp = (MATCHMAP *)mp->fc_mptr;
1339 1376 }
1340 1377
1341 1378 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1342 1379 "IP Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1343 1380 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1344 1381
1345 1382 #ifdef SFCT_SUPPORT
1346 1383 } else if (rp->ringno == hba->CHANNEL_FCT) {
1347 1384 mp = (MATCHMAP *)rp->fc_mpoff;
1348 1385 prev = 0;
1349 1386
1350 1387 while (mp) {
1351 1388 if (mp->phys == mapbp) {
1352 1389 if (prev == 0) {
1353 1390 rp->fc_mpoff = mp->fc_mptr;
1354 1391 } else {
1355 1392 prev->fc_mptr = mp->fc_mptr;
1356 1393 }
1357 1394
1358 1395 if (rp->fc_mpon == mp) {
1359 1396 rp->fc_mpon = (void *)prev;
1360 1397 }
1361 1398
1362 1399 mp->fc_mptr = NULL;
1363 1400
1364 1401 EMLXS_MPDATA_SYNC(mp->dma_handle, 0, mp->size,
1365 1402 DDI_DMA_SYNC_FORKERNEL);
1366 1403
1367 1404 HBASTATS.FctUbPosted--;
1368 1405
1369 1406 return (mp);
1370 1407 }
1371 1408
1372 1409 prev = mp;
1373 1410 mp = (MATCHMAP *)mp->fc_mptr;
1374 1411 }
1375 1412
1376 1413 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_pool_error_msg,
1377 1414 "FCT Buffer not mapped: bp=%lx ringno=%x mpoff=%p mpon=%p",
1378 1415 mapbp, rp->ringno, rp->fc_mpoff, rp->fc_mpon);
1379 1416
1380 1417 #endif /* SFCT_SUPPORT */
1381 1418 }
1382 1419
1383 1420 return (0);
1384 1421
1385 1422 } /* emlxs_mem_get_vaddr() */
1386 1423
1387 1424
1388 1425 /*
1389 1426 * Given a virtual address bp, generate the physical mapped address and
1390 1427 * place it where addr points to. Save the address pair for lookup later.
1391 1428 */
1392 1429 /* SLI3 */
1393 1430 extern void
1394 1431 emlxs_mem_map_vaddr(emlxs_hba_t *hba, RING *rp, MATCHMAP *mp,
1395 1432 uint32_t *haddr, uint32_t *laddr)
1396 1433 {
1397 1434 if (rp->ringno == hba->channel_els) {
1398 1435 /*
1399 1436 * Update slot fc_mpon points to then bump it
1400 1437 * fc_mpoff is pointer head of the list.
1401 1438 * fc_mpon is pointer tail of the list.
1402 1439 */
1403 1440 mp->fc_mptr = NULL;
1404 1441 if (rp->fc_mpoff == 0) {
1405 1442 rp->fc_mpoff = (void *)mp;
1406 1443 rp->fc_mpon = (void *)mp;
1407 1444 } else {
1408 1445 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1409 1446 (void *)mp;
1410 1447 rp->fc_mpon = (void *)mp;
1411 1448 }
1412 1449
1413 1450 if (hba->flag & FC_SLIM2_MODE) {
1414 1451
1415 1452 /* return mapped address */
1416 1453 *haddr = PADDR_HI(mp->phys);
1417 1454 /* return mapped address */
1418 1455 *laddr = PADDR_LO(mp->phys);
1419 1456 } else {
1420 1457 /* return mapped address */
1421 1458 *laddr = PADDR_LO(mp->phys);
1422 1459 }
1423 1460
1424 1461 HBASTATS.ElsUbPosted++;
1425 1462
1426 1463 } else if (rp->ringno == hba->channel_ct) {
1427 1464 /*
1428 1465 * Update slot fc_mpon points to then bump it
1429 1466 * fc_mpoff is pointer head of the list.
1430 1467 * fc_mpon is pointer tail of the list.
1431 1468 */
1432 1469 mp->fc_mptr = NULL;
1433 1470 if (rp->fc_mpoff == 0) {
1434 1471 rp->fc_mpoff = (void *)mp;
1435 1472 rp->fc_mpon = (void *)mp;
1436 1473 } else {
1437 1474 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1438 1475 (void *)mp;
1439 1476 rp->fc_mpon = (void *)mp;
1440 1477 }
1441 1478
1442 1479 if (hba->flag & FC_SLIM2_MODE) {
1443 1480 /* return mapped address */
1444 1481 *haddr = PADDR_HI(mp->phys);
1445 1482 /* return mapped address */
1446 1483 *laddr = PADDR_LO(mp->phys);
1447 1484 } else {
1448 1485 /* return mapped address */
1449 1486 *laddr = PADDR_LO(mp->phys);
1450 1487 }
1451 1488
1452 1489 HBASTATS.CtUbPosted++;
1453 1490
1454 1491
1455 1492 } else if (rp->ringno == hba->channel_ip) {
1456 1493 /*
1457 1494 * Update slot fc_mpon points to then bump it
1458 1495 * fc_mpoff is pointer head of the list.
1459 1496 * fc_mpon is pointer tail of the list.
1460 1497 */
1461 1498 mp->fc_mptr = NULL;
1462 1499 if (rp->fc_mpoff == 0) {
1463 1500 rp->fc_mpoff = (void *)mp;
1464 1501 rp->fc_mpon = (void *)mp;
1465 1502 } else {
1466 1503 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1467 1504 (void *)mp;
1468 1505 rp->fc_mpon = (void *)mp;
1469 1506 }
1470 1507
1471 1508 if (hba->flag & FC_SLIM2_MODE) {
1472 1509 /* return mapped address */
1473 1510 *haddr = PADDR_HI(mp->phys);
1474 1511 *laddr = PADDR_LO(mp->phys);
1475 1512 } else {
1476 1513 *laddr = PADDR_LO(mp->phys);
1477 1514 }
1478 1515
1479 1516 HBASTATS.IpUbPosted++;
1480 1517
1481 1518
1482 1519 #ifdef SFCT_SUPPORT
1483 1520 } else if (rp->ringno == hba->CHANNEL_FCT) {
1484 1521 /*
1485 1522 * Update slot fc_mpon points to then bump it
1486 1523 * fc_mpoff is pointer head of the list.
1487 1524 * fc_mpon is pointer tail of the list.
1488 1525 */
1489 1526 mp->fc_mptr = NULL;
1490 1527 if (rp->fc_mpoff == 0) {
1491 1528 rp->fc_mpoff = (void *)mp;
1492 1529 rp->fc_mpon = (void *)mp;
1493 1530 } else {
1494 1531 ((MATCHMAP *)(rp->fc_mpon))->fc_mptr =
1495 1532 (void *)mp;
1496 1533 rp->fc_mpon = (void *)mp;
1497 1534 }
1498 1535
1499 1536 if (hba->flag & FC_SLIM2_MODE) {
1500 1537 /* return mapped address */
1501 1538 *haddr = PADDR_HI(mp->phys);
1502 1539 /* return mapped address */
1503 1540 *laddr = PADDR_LO(mp->phys);
1504 1541 } else {
1505 1542 /* return mapped address */
1506 1543 *laddr = PADDR_LO(mp->phys);
1507 1544 }
1508 1545
1509 1546 HBASTATS.FctUbPosted++;
1510 1547
1511 1548 #endif /* SFCT_SUPPORT */
1512 1549 }
1513 1550 } /* emlxs_mem_map_vaddr() */
1514 1551
1515 1552
1516 1553 /* SLI3 */
1517 1554 uint32_t
1518 1555 emlxs_hbq_alloc(emlxs_hba_t *hba, uint32_t hbq_id)
1519 1556 {
1520 1557 emlxs_port_t *port = &PPORT;
1521 1558 HBQ_INIT_t *hbq;
1522 1559 MBUF_INFO *buf_info;
1523 1560 MBUF_INFO bufinfo;
1524 1561
1525 1562 hbq = &hba->sli.sli3.hbq_table[hbq_id];
1526 1563
1527 1564 if (hbq->HBQ_host_buf.virt == 0) {
1528 1565 buf_info = &bufinfo;
1529 1566
1530 1567 /* Get the system's page size in a DDI-compliant way. */
1531 1568 bzero(buf_info, sizeof (MBUF_INFO));
1532 1569 buf_info->size = hbq->HBQ_numEntries * sizeof (HBQE_t);
1533 1570 buf_info->flags = FC_MBUF_DMA;
1534 1571 buf_info->align = 4096;
1535 1572
1536 1573 (void) emlxs_mem_alloc(hba, buf_info);
1537 1574
1538 1575 if (buf_info->virt == NULL) {
1539 1576 EMLXS_MSGF(EMLXS_CONTEXT, &emlxs_mem_alloc_msg,
1540 1577 "Unable to alloc HBQ.");
1541 1578 return (ENOMEM);
1542 1579 }
1543 1580
1544 1581 hbq->HBQ_host_buf.virt = buf_info->virt;
1545 1582 hbq->HBQ_host_buf.phys = buf_info->phys;
1546 1583 hbq->HBQ_host_buf.data_handle = buf_info->data_handle;
1547 1584 hbq->HBQ_host_buf.dma_handle = buf_info->dma_handle;
1548 1585 hbq->HBQ_host_buf.size = buf_info->size;
1549 1586 hbq->HBQ_host_buf.tag = hbq_id;
1550 1587
1551 1588 bzero((char *)hbq->HBQ_host_buf.virt, buf_info->size);
1552 1589 }
1553 1590
1554 1591 return (0);
1555 1592
1556 1593 } /* emlxs_hbq_alloc() */
|
↓ open down ↓ |
490 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX