1 /*
2 * CDDL HEADER START
3 *
4 * The contents of this file are subject to the terms of the
5 * Common Development and Distribution License (the "License").
6 * You may not use this file except in compliance with the License.
7 *
8 * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
9 * or http://www.opensolaris.org/os/licensing.
10 * See the License for the specific language governing permissions
11 * and limitations under the License.
12 *
13 * When distributing Covered Code, include this CDDL HEADER in each
14 * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
15 * If applicable, add the following below this CDDL HEADER, with the
16 * fields enclosed by brackets "[]" replaced with your own identifying
17 * information: Portions Copyright [yyyy] [name of copyright owner]
18 *
19 * CDDL HEADER END
20 */
21
22 /* Copyright 2015 QLogic Corporation */
23
24 /*
25 * Copyright (c) 2008, 2010, Oracle and/or its affiliates. All rights reserved.
26 */
27
28 #pragma ident "Copyright 2015 QLogic Corporation; ql_iocb.c"
29
30 /*
31 * ISP2xxx Solaris Fibre Channel Adapter (FCA) driver source file.
32 *
33 * ***********************************************************************
34 * * **
35 * * NOTICE **
36 * * COPYRIGHT (C) 1996-2015 QLOGIC CORPORATION **
37 * * ALL RIGHTS RESERVED **
38 * * **
39 * ***********************************************************************
40 *
41 */
42
43 #include <ql_apps.h>
44 #include <ql_api.h>
45 #include <ql_debug.h>
46 #include <ql_iocb.h>
47 #include <ql_isr.h>
48 #include <ql_nx.h>
49 #include <ql_xioctl.h>
50 #include <ql_fm.h>
51
52
53 /*
54 * Local Function Prototypes.
55 */
56 static int ql_req_pkt(ql_adapter_state_t *, ql_request_q_t *, request_t **);
57 static void ql_isp_cmd(ql_adapter_state_t *, ql_request_q_t *);
58 static void ql_continuation_iocb(ql_adapter_state_t *, ql_request_q_t *,
59 ddi_dma_cookie_t *, uint16_t, boolean_t);
60 static void ql_isp24xx_rcvbuf(ql_adapter_state_t *);
61 static void ql_cmd_24xx_type_6_iocb(ql_adapter_state_t *, ql_request_q_t *,
62 ql_srb_t *, void *);
63
64 /*
65 * ql_start_iocb
66 * The start IOCB is responsible for building request packets
67 * on request ring and modifying ISP input pointer.
68 *
69 * Input:
70 * ha: adapter state pointer.
71 * sp: srb structure pointer.
72 *
73 * Context:
74 * Interrupt or Kernel context, no mailbox commands allowed.
75 */
76 void
77 ql_start_iocb(ql_adapter_state_t *vha, ql_srb_t *sp)
78 {
79 ql_link_t *link;
80 ql_request_q_t *req_q;
81 request_t *pkt;
82 uint64_t *ptr64;
83 uint32_t cnt;
84 ql_adapter_state_t *ha = vha->pha;
85
86 QL_PRINT_3(ha, "started\n");
87
88 /* Acquire ring lock. */
89 REQUEST_RING_LOCK(ha);
90
91 if (sp != NULL) {
92 /*
93 * If the pending queue is not empty maintain order
94 * by puting this srb at the tail and geting the head.
95 */
96 if ((link = ha->pending_cmds.first) != NULL) {
97 ql_add_link_b(&ha->pending_cmds, &sp->cmd);
98 /* Remove command from pending command queue */
99 sp = link->base_address;
100 ql_remove_link(&ha->pending_cmds, &sp->cmd);
101 }
102 } else {
103 /* Get command from pending command queue if not empty. */
104 if ((link = ha->pending_cmds.first) == NULL) {
105 /* Release ring specific lock */
106 REQUEST_RING_UNLOCK(ha);
107 QL_PRINT_3(ha, "empty done\n");
108 return;
109 }
110 /* Remove command from pending command queue */
111 sp = link->base_address;
112 ql_remove_link(&ha->pending_cmds, &sp->cmd);
113 }
114
115 /* start this request and as many others as possible */
116 for (;;) {
117 if (ha->req_q[1] != NULL && sp->rsp_q_number != 0) {
118 req_q = ha->req_q[1];
119 } else {
120 req_q = ha->req_q[0];
121 }
122
123 if (req_q->req_q_cnt < sp->req_cnt) {
124 /* Calculate number of free request entries. */
125 if (ha->flags & QUEUE_SHADOW_PTRS) {
126 (void) ddi_dma_sync(req_q->req_ring.dma_handle,
127 (off_t)req_q->req_out_shadow_ofst,
128 SHADOW_ENTRY_SIZE, DDI_DMA_SYNC_FORCPU);
129 cnt = ddi_get32(req_q->req_ring.acc_handle,
130 req_q->req_out_shadow_ptr);
131 } else if (ha->flags & MULTI_QUEUE) {
132 cnt = RD16_MBAR_REG(ha, req_q->mbar_req_out);
133 } else {
134 cnt = RD16_IO_REG(ha, req_out);
135 }
136 if (req_q->req_ring_index < cnt) {
137 req_q->req_q_cnt = (uint16_t)
138 (cnt - req_q->req_ring_index);
139 } else {
140 req_q->req_q_cnt =
141 (uint16_t)(req_q->req_entry_cnt -
142 (req_q->req_ring_index - cnt));
143 }
144 if (req_q->req_q_cnt != 0) {
145 req_q->req_q_cnt--;
146 }
147
148 /*
149 * If no room in request ring put this srb at
150 * the head of the pending queue and exit.
151 */
152 if (req_q->req_q_cnt < sp->req_cnt) {
153 QL_PRINT_8(ha, "request ring full,"
154 " req_q_cnt=%d, req_ring_index=%d\n",
155 req_q->req_q_cnt, req_q->req_ring_index);
156 ql_add_link_t(&ha->pending_cmds, &sp->cmd);
157 break;
158 }
159 }
160
161 /* Check for room in outstanding command list. */
162 for (cnt = 1; cnt < ha->osc_max_cnt; cnt++) {
163 ha->osc_index++;
164 if (ha->osc_index == ha->osc_max_cnt) {
165 ha->osc_index = 1;
166 }
167 if (ha->outstanding_cmds[ha->osc_index] == NULL) {
168 break;
169 }
170 }
171 /*
172 * If no room in outstanding array put this srb at
173 * the head of the pending queue and exit.
174 */
175 if (cnt == ha->osc_max_cnt) {
176 QL_PRINT_8(ha, "no room in outstanding array\n");
177 ql_add_link_t(&ha->pending_cmds, &sp->cmd);
178 break;
179 }
180
181 /* nothing to stop us now. */
182 ha->outstanding_cmds[ha->osc_index] = sp;
183 /* create and save a unique response identifier in the srb */
184 sp->handle = ha->adapter_stats->ncmds << OSC_INDEX_SHIFT |
185 ha->osc_index;
186 req_q->req_q_cnt = (uint16_t)(req_q->req_q_cnt - sp->req_cnt);
187
188 /* build the iocb in the request ring */
189 pkt = req_q->req_ring_ptr;
190 sp->request_ring_ptr = pkt;
191 sp->req_q_number = req_q->req_q_number;
192 sp->flags |= SRB_IN_TOKEN_ARRAY;
193
194 /* Zero out packet. */
195 ptr64 = (uint64_t *)pkt;
196 *ptr64++ = 0; *ptr64++ = 0;
197 *ptr64++ = 0; *ptr64++ = 0;
198 *ptr64++ = 0; *ptr64++ = 0;
199 *ptr64++ = 0; *ptr64 = 0;
200
201 /* Setup IOCB common data. */
202 pkt->entry_count = (uint8_t)sp->req_cnt;
203 if (ha->req_q[1] != NULL && sp->rsp_q_number != 0) {
204 pkt->entry_status = sp->rsp_q_number;
205 }
206 pkt->sys_define = (uint8_t)req_q->req_ring_index;
207
208 /* mark the iocb with the response identifier */
209 ddi_put32(req_q->req_ring.acc_handle, &pkt->handle,
210 (uint32_t)sp->handle);
211
212 /* Setup IOCB unique data. */
213 (sp->iocb)(vha, req_q, sp, pkt);
214
215 sp->flags |= SRB_ISP_STARTED;
216
217 QL_PRINT_5(ha, "req packet, sp=%p\n", (void *)sp);
218 QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE);
219
220 /* Sync DMA buffer. */
221 (void) ddi_dma_sync(req_q->req_ring.dma_handle,
222 (off_t)(req_q->req_ring_index * REQUEST_ENTRY_SIZE),
223 (size_t)REQUEST_ENTRY_SIZE, DDI_DMA_SYNC_FORDEV);
224
225 /* Adjust ring index. */
226 req_q->req_ring_index++;
227 if (req_q->req_ring_index == REQUEST_ENTRY_CNT) {
228 req_q->req_ring_index = 0;
229 req_q->req_ring_ptr = req_q->req_ring.bp;
230 } else {
231 req_q->req_ring_ptr++;
232 }
233
234 /* Reset watchdog timer */
235 sp->wdg_q_time = sp->init_wdg_q_time;
236
237 /*
238 * Send it by setting the new ring index in the ISP Request
239 * Ring In Pointer register. This is the mechanism
240 * used to notify the isp that a new iocb has been
241 * placed on the request ring.
242 */
243 if (ha->flags & MULTI_QUEUE) {
244 WR16_MBAR_REG(ha, req_q->mbar_req_in,
245 req_q->req_ring_index);
246 } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
247 ql_8021_wr_req_in(ha, req_q->req_ring_index);
248 } else {
249 WRT16_IO_REG(ha, req_in, req_q->req_ring_index);
250 }
251
252 /* Update outstanding command count statistic. */
253 ha->adapter_stats->ncmds++;
254
255 /* if there is a pending command, try to start it. */
256 if ((link = ha->pending_cmds.first) == NULL) {
257 break;
258 }
259
260 /* Remove command from pending command queue */
261 sp = link->base_address;
262 ql_remove_link(&ha->pending_cmds, &sp->cmd);
263 }
264
265 if (qlc_fm_check_acc_handle(ha, ha->dev_handle)
266 != DDI_FM_OK) {
267 qlc_fm_report_err_impact(ha,
268 QL_FM_EREPORT_ACC_HANDLE_CHECK);
269 }
270
271 /* Release ring specific lock */
272 REQUEST_RING_UNLOCK(ha);
273
274 QL_PRINT_3(ha, "done\n");
275 }
276
277 /*
278 * ql_req_pkt
279 * Function is responsible for locking ring and
280 * getting a zeroed out request packet.
281 *
282 * Input:
283 * ha: adapter state pointer.
284 * req_q: request queue structure pointer.
285 * pkt: address for packet pointer.
286 *
287 * Returns:
288 * ql local function return status code.
289 *
290 * Context:
291 * Interrupt or Kernel context, no mailbox commands allowed.
292 */
293 static int
294 ql_req_pkt(ql_adapter_state_t *vha, ql_request_q_t *req_q, request_t **pktp)
295 {
296 uint16_t cnt;
297 uint64_t *ptr64;
298 uint32_t timer;
299 int rval = QL_FUNCTION_TIMEOUT;
300 ql_adapter_state_t *ha = vha->pha;
301
302 QL_PRINT_3(ha, "started\n");
303
304 /* Wait for 30 seconds for slot. */
305 for (timer = 30000; timer != 0; timer--) {
306 /* Acquire ring lock. */
307 REQUEST_RING_LOCK(ha);
308
309 if (req_q->req_q_cnt == 0) {
310 /* Calculate number of free request entries. */
311 if (ha->flags & QUEUE_SHADOW_PTRS) {
312 (void) ddi_dma_sync(req_q->req_ring.dma_handle,
313 (off_t)req_q->req_out_shadow_ofst,
314 SHADOW_ENTRY_SIZE, DDI_DMA_SYNC_FORCPU);
315 cnt = ddi_get32(req_q->req_ring.acc_handle,
316 req_q->req_out_shadow_ptr);
317 } else if (ha->flags & MULTI_QUEUE) {
318 cnt = RD16_MBAR_REG(ha, req_q->mbar_req_out);
319 } else {
320 cnt = RD16_IO_REG(ha, req_out);
321 }
322 if (req_q->req_ring_index < cnt) {
323 req_q->req_q_cnt = (uint16_t)
324 (cnt - req_q->req_ring_index);
325 } else {
326 req_q->req_q_cnt = (uint16_t)
327 (REQUEST_ENTRY_CNT -
328 (req_q->req_ring_index - cnt));
329 }
330 if (req_q->req_q_cnt != 0) {
331 req_q->req_q_cnt--;
332 }
333 }
334
335 /* Found empty request ring slot? */
336 if (req_q->req_q_cnt != 0) {
337 req_q->req_q_cnt--;
338 *pktp = req_q->req_ring_ptr;
339
340 /* Zero out packet. */
341 ptr64 = (uint64_t *)req_q->req_ring_ptr;
342 *ptr64++ = 0; *ptr64++ = 0;
343 *ptr64++ = 0; *ptr64++ = 0;
344 *ptr64++ = 0; *ptr64++ = 0;
345 *ptr64++ = 0; *ptr64 = 0;
346
347 /* Setup IOCB common data. */
348 req_q->req_ring_ptr->entry_count = 1;
349 req_q->req_ring_ptr->sys_define =
350 (uint8_t)req_q->req_ring_index;
351 ddi_put32(req_q->req_ring.acc_handle,
352 &req_q->req_ring_ptr->handle,
353 (uint32_t)QL_FCA_BRAND);
354
355 rval = QL_SUCCESS;
356
357 break;
358 }
359
360 /* Release request queue lock. */
361 REQUEST_RING_UNLOCK(ha);
362
363 drv_usecwait(MILLISEC);
364
365 /* Check for pending interrupts. */
366 /*
367 * XXX protect interrupt routine from calling itself.
368 * Need to revisit this routine. So far we never
369 * hit this case as req slot was available
370 */
371 if ((!(curthread->t_flag & T_INTR_THREAD)) &&
372 INTERRUPT_PENDING(ha)) {
373 (void) ql_isr((caddr_t)ha);
374 INTR_LOCK(ha);
375 ha->intr_claimed = TRUE;
376 INTR_UNLOCK(ha);
377 }
378 }
379
380 if (rval != QL_SUCCESS) {
381 ql_awaken_task_daemon(ha, NULL, ISP_ABORT_NEEDED, 0);
382 EL(ha, "failed, rval = %xh, isp_abort_needed\n", rval);
383 } else {
384 /*EMPTY*/
385 QL_PRINT_3(ha, "done\n");
386 }
387 return (rval);
388 }
389
390 /*
391 * ql_isp_cmd
392 * Function is responsible for modifying ISP input pointer.
393 * This action notifies the isp that a new request has been
394 * added to the request ring.
395 *
396 * Releases ring lock.
397 *
398 * Input:
399 * vha: adapter state pointer.
400 * req_q: request queue structure pointer.
401 *
402 * Context:
403 * Interrupt or Kernel context, no mailbox commands allowed.
404 */
405 static void
406 ql_isp_cmd(ql_adapter_state_t *vha, ql_request_q_t *req_q)
407 {
408 ql_adapter_state_t *ha = vha->pha;
409
410 QL_PRINT_3(ha, "started\n");
411
412 QL_PRINT_5(ha, "req packet:\n");
413 QL_DUMP_5((uint8_t *)req_q->req_ring_ptr, 8, REQUEST_ENTRY_SIZE);
414
415 /* Sync DMA buffer. */
416 (void) ddi_dma_sync(req_q->req_ring.dma_handle,
417 (off_t)(req_q->req_ring_index * REQUEST_ENTRY_SIZE),
418 (size_t)REQUEST_ENTRY_SIZE, DDI_DMA_SYNC_FORDEV);
419
420 /* Adjust ring index. */
421 req_q->req_ring_index++;
422 if (req_q->req_ring_index == REQUEST_ENTRY_CNT) {
423 req_q->req_ring_index = 0;
424 req_q->req_ring_ptr = req_q->req_ring.bp;
425 } else {
426 req_q->req_ring_ptr++;
427 }
428
429 /* Set chip new ring index. */
430 if (ha->flags & MULTI_QUEUE) {
431 WR16_MBAR_REG(ha, req_q->mbar_req_in,
432 req_q->req_ring_index);
433 } else if (CFG_IST(ha, CFG_CTRL_82XX)) {
434 ql_8021_wr_req_in(ha, req_q->req_ring_index);
435 } else {
436 WRT16_IO_REG(ha, req_in, req_q->req_ring_index);
437 }
438
439 /* Release ring lock. */
440 REQUEST_RING_UNLOCK(ha);
441
442 QL_PRINT_3(ha, "done\n");
443 }
444
445 /*
446 * ql_command_iocb
447 * Setup of command IOCB.
448 *
449 * Input:
450 * ha: adapter state pointer.
451 * req_q: request queue structure pointer.
452 * sp: srb structure pointer.
453 * arg: request queue packet.
454 *
455 * Context:
456 * Interrupt or Kernel context, no mailbox commands allowed.
457 */
458 void
459 ql_command_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q, ql_srb_t *sp,
460 void *arg)
461 {
462 ddi_dma_cookie_t *cp;
463 uint32_t *ptr32, cnt;
464 uint16_t seg_cnt;
465 fcp_cmd_t *fcp = sp->fcp;
466 ql_tgt_t *tq = sp->lun_queue->target_queue;
467 cmd_entry_t *pkt = arg;
468 cmd_3_entry_t *pkt3 = arg;
469
470 QL_PRINT_3(ha, "started\n");
471
472 /* Set LUN number */
473 pkt->lun_l = LSB(sp->lun_queue->lun_no);
474 pkt->lun_h = MSB(sp->lun_queue->lun_no);
475
476 /* Set target ID */
477 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
478 pkt->target_l = LSB(tq->loop_id);
479 pkt->target_h = MSB(tq->loop_id);
480 } else {
481 pkt->target_h = LSB(tq->loop_id);
482 }
483
484 /* Set tag queue control flags */
485 if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_HEAD_OF_Q) {
486 pkt->control_flags_l = (uint8_t)
487 (pkt->control_flags_l | CF_HTAG);
488 } else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_ORDERED) {
489 pkt->control_flags_l = (uint8_t)
490 (pkt->control_flags_l | CF_OTAG);
491 /* else if (fcp->fcp_cntl.cntl_qtype == FCP_QTYPE_SIMPLE) */
492 } else {
493 pkt->control_flags_l = (uint8_t)
494 (pkt->control_flags_l | CF_STAG);
495 }
496
497 /* Set ISP command timeout. */
498 ddi_put16(req_q->req_ring.acc_handle, &pkt->timeout, sp->isp_timeout);
499
500 /* Load SCSI CDB */
501 ddi_rep_put8(req_q->req_ring.acc_handle, fcp->fcp_cdb,
502 pkt->scsi_cdb, MAX_CMDSZ, DDI_DEV_AUTOINCR);
503
504 if (fcp->fcp_data_len == 0) {
505 QL_PRINT_3(ha, "done\n");
506 pkt->entry_type = IOCB_CMD_TYPE_2;
507 ha->xioctl->IOControlRequests++;
508 return;
509 }
510
511 /*
512 * Set transfer direction. Load Data segments.
513 */
514 if (fcp->fcp_cntl.cntl_write_data) {
515 pkt->control_flags_l = (uint8_t)
516 (pkt->control_flags_l | CF_DATA_OUT);
517 ha->xioctl->IOOutputRequests++;
518 ha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
519 } else if (fcp->fcp_cntl.cntl_read_data) {
520 pkt->control_flags_l = (uint8_t)
521 (pkt->control_flags_l | CF_DATA_IN);
522 ha->xioctl->IOInputRequests++;
523 ha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
524 }
525
526 /* Set data segment count. */
527 seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
528 ddi_put16(req_q->req_ring.acc_handle, &pkt->dseg_count, seg_cnt);
529
530 /* Load total byte count. */
531 ddi_put32(req_q->req_ring.acc_handle, &pkt->byte_count,
532 fcp->fcp_data_len);
533
534 /* Load command data segment. */
535 cp = sp->pkt->pkt_data_cookie;
536
537 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
538 pkt3->entry_type = IOCB_CMD_TYPE_3;
539 cnt = CMD_TYPE_3_DATA_SEGMENTS;
540
541 ptr32 = (uint32_t *)&pkt3->dseg;
542 while (cnt && seg_cnt) {
543 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
544 cp->dmac_address);
545 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
546 cp->dmac_notused);
547 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
548 (uint32_t)cp->dmac_size);
549 seg_cnt--;
550 cnt--;
551 cp++;
552 }
553 } else {
554 pkt->entry_type = IOCB_CMD_TYPE_2;
555 cnt = CMD_TYPE_2_DATA_SEGMENTS;
556
557 ptr32 = (uint32_t *)&pkt->dseg;
558 while (cnt && seg_cnt) {
559 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
560 cp->dmac_address);
561 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
562 (uint32_t)cp->dmac_size);
563 seg_cnt--;
564 cnt--;
565 cp++;
566 }
567 }
568
569 /*
570 * Build continuation packets.
571 */
572 if (seg_cnt) {
573 ql_continuation_iocb(ha, req_q, cp, seg_cnt,
574 (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)));
575 }
576
577 QL_PRINT_3(ha, "done\n");
578 }
579
580 /*
581 * ql_continuation_iocb
582 * Setup of continuation IOCB.
583 *
584 * Input:
585 * ha: adapter state pointer.
586 * req_q: request queue structure pointer.
587 * cp: cookie list pointer.
588 * seg_cnt: number of segments.
589 * addr64: 64 bit addresses.
590 *
591 * Context:
592 * Interrupt or Kernel context, no mailbox commands allowed.
593 */
594 /* ARGSUSED */
595 static void
596 ql_continuation_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q,
597 ddi_dma_cookie_t *cp, uint16_t seg_cnt, boolean_t addr64)
598 {
599 cont_entry_t *pkt;
600 cont_type_1_entry_t *pkt1;
601 uint64_t *ptr64;
602 uint32_t *ptr32, cnt;
603
604 QL_PRINT_3(ha, "started\n");
605
606 /*
607 * Build continuation packets.
608 */
609 while (seg_cnt) {
610 /* Sync DMA buffer. */
611 (void) ddi_dma_sync(req_q->req_ring.dma_handle,
612 (off_t)(req_q->req_ring_index * REQUEST_ENTRY_SIZE),
613 REQUEST_ENTRY_SIZE, DDI_DMA_SYNC_FORDEV);
614
615 /* Adjust ring pointer, and deal with wrap. */
616 req_q->req_ring_index++;
617 if (req_q->req_ring_index == REQUEST_ENTRY_CNT) {
618 req_q->req_ring_index = 0;
619 req_q->req_ring_ptr = req_q->req_ring.bp;
620 } else {
621 req_q->req_ring_ptr++;
622 }
623 pkt = (cont_entry_t *)req_q->req_ring_ptr;
624 pkt1 = (cont_type_1_entry_t *)req_q->req_ring_ptr;
625
626 /* Zero out packet. */
627 ptr64 = (uint64_t *)pkt;
628 *ptr64++ = 0; *ptr64++ = 0;
629 *ptr64++ = 0; *ptr64++ = 0;
630 *ptr64++ = 0; *ptr64++ = 0;
631 *ptr64++ = 0; *ptr64 = 0;
632
633 /*
634 * Build continuation packet.
635 */
636 pkt->entry_count = 1;
637 pkt->sys_define = (uint8_t)req_q->req_ring_index;
638 if (addr64) {
639 pkt1->entry_type = CONTINUATION_TYPE_1;
640 cnt = CONT_TYPE_1_DATA_SEGMENTS;
641 ptr32 = (uint32_t *)&pkt1->dseg;
642 while (cnt && seg_cnt) {
643 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
644 cp->dmac_address);
645 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
646 cp->dmac_notused);
647 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
648 (uint32_t)cp->dmac_size);
649 seg_cnt--;
650 cnt--;
651 cp++;
652 }
653 } else {
654 pkt->entry_type = CONTINUATION_TYPE_0;
655 cnt = CONT_TYPE_0_DATA_SEGMENTS;
656 ptr32 = (uint32_t *)&pkt->dseg;
657 while (cnt && seg_cnt) {
658 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
659 cp->dmac_address);
660 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
661 (uint32_t)cp->dmac_size);
662 seg_cnt--;
663 cnt--;
664 cp++;
665 }
666 }
667
668 QL_PRINT_5(ha, "packet:\n");
669 QL_DUMP_5((uint8_t *)pkt, 8, REQUEST_ENTRY_SIZE);
670 }
671
672 QL_PRINT_3(ha, "done\n");
673 }
674
675 /*
676 * ql_command_24xx_iocb
677 * Setup of ISP24xx command IOCB.
678 *
679 * Input:
680 * ha: adapter state pointer.
681 * req_q: request queue structure pointer.
682 * sp: srb structure pointer.
683 * arg: request queue packet.
684 *
685 * Context:
686 * Interrupt or Kernel context, no mailbox commands allowed.
687 */
688 void
689 ql_command_24xx_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q,
690 ql_srb_t *sp, void *arg)
691 {
692 ddi_dma_cookie_t *cp;
693 uint32_t *ptr32, cnt;
694 uint16_t seg_cnt;
695 fcp_cmd_t *fcp = sp->fcp;
696 ql_tgt_t *tq = sp->lun_queue->target_queue;
697 cmd7_24xx_entry_t *pkt = arg;
698 ql_adapter_state_t *pha = ha->pha;
699 fcp_ent_addr_t *fcp_ent_addr;
700
701 QL_PRINT_3(ha, "started\n");
702
703 if (fcp->fcp_data_len != 0 && sp->sg_dma.dma_handle != NULL &&
704 sp->pkt->pkt_data_cookie_cnt > 1) {
705 ql_cmd_24xx_type_6_iocb(ha, req_q, sp, arg);
706 QL_PRINT_3(ha, "cmd6 exit\n");
707 return;
708 }
709
710 pkt->entry_type = IOCB_CMD_TYPE_7;
711
712 /* Set LUN number */
713 fcp_ent_addr = (fcp_ent_addr_t *)&sp->lun_queue->lun_addr;
714 pkt->fcp_lun[2] = lobyte(fcp_ent_addr->ent_addr_0);
715 pkt->fcp_lun[3] = hibyte(fcp_ent_addr->ent_addr_0);
716 pkt->fcp_lun[0] = lobyte(fcp_ent_addr->ent_addr_1);
717 pkt->fcp_lun[1] = hibyte(fcp_ent_addr->ent_addr_1);
718 pkt->fcp_lun[6] = lobyte(fcp_ent_addr->ent_addr_2);
719 pkt->fcp_lun[7] = hibyte(fcp_ent_addr->ent_addr_2);
720 pkt->fcp_lun[4] = lobyte(fcp_ent_addr->ent_addr_3);
721 pkt->fcp_lun[5] = hibyte(fcp_ent_addr->ent_addr_3);
722
723 /* Set N_port handle */
724 ddi_put16(req_q->req_ring.acc_handle, &pkt->n_port_hdl, tq->loop_id);
725
726 /* Set target ID */
727 pkt->target_id[0] = tq->d_id.b.al_pa;
728 pkt->target_id[1] = tq->d_id.b.area;
729 pkt->target_id[2] = tq->d_id.b.domain;
730
731 pkt->vp_index = ha->vp_index;
732
733 /* Set ISP command timeout. */
734 if (sp->isp_timeout < 0x1999) {
735 ddi_put16(req_q->req_ring.acc_handle, &pkt->timeout,
736 sp->isp_timeout);
737 }
738
739 /* Load SCSI CDB */
740 ddi_rep_put8(req_q->req_ring.acc_handle, fcp->fcp_cdb, pkt->scsi_cdb,
741 MAX_CMDSZ, DDI_DEV_AUTOINCR);
742 for (cnt = 0; cnt < MAX_CMDSZ; cnt += 4) {
743 ql_chg_endian((uint8_t *)&pkt->scsi_cdb + cnt, 4);
744 }
745
746 /*
747 * Set tag queue control flags
748 * Note:
749 * Cannot copy fcp->fcp_cntl.cntl_qtype directly,
750 * problem with x86 in 32bit kernel mode
751 */
752 switch (fcp->fcp_cntl.cntl_qtype) {
753 case FCP_QTYPE_SIMPLE:
754 pkt->task = TA_STAG;
755 break;
756 case FCP_QTYPE_HEAD_OF_Q:
757 pkt->task = TA_HTAG;
758 break;
759 case FCP_QTYPE_ORDERED:
760 pkt->task = TA_OTAG;
761 break;
762 case FCP_QTYPE_ACA_Q_TAG:
763 pkt->task = TA_ACA;
764 break;
765 case FCP_QTYPE_UNTAGGED:
766 pkt->task = TA_UNTAGGED;
767 break;
768 default:
769 break;
770 }
771
772 if (fcp->fcp_data_len == 0) {
773 QL_PRINT_3(ha, "done\n");
774 pha->xioctl->IOControlRequests++;
775 return;
776 }
777
778 /* Set transfer direction. */
779 if (fcp->fcp_cntl.cntl_write_data) {
780 pkt->control_flags = CF_WR;
781 pha->xioctl->IOOutputRequests++;
782 pha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
783 } else if (fcp->fcp_cntl.cntl_read_data) {
784 pkt->control_flags = CF_RD;
785 pha->xioctl->IOInputRequests++;
786 pha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
787 }
788
789 /* Set data segment count. */
790 seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
791 ddi_put16(req_q->req_ring.acc_handle, &pkt->dseg_count, seg_cnt);
792
793 /* Load total byte count. */
794 ddi_put32(req_q->req_ring.acc_handle, &pkt->total_byte_count,
795 fcp->fcp_data_len);
796
797 /* Load command data segment. */
798 ptr32 = (uint32_t *)&pkt->dseg;
799 cp = sp->pkt->pkt_data_cookie;
800 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_address);
801 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_notused);
802 ddi_put32(req_q->req_ring.acc_handle, ptr32, (uint32_t)cp->dmac_size);
803 seg_cnt--;
804 cp++;
805
806 /*
807 * Build continuation packets.
808 */
809 if (seg_cnt) {
810 ql_continuation_iocb(pha, req_q, cp, seg_cnt, B_TRUE);
811 }
812
813 QL_PRINT_3(ha, "done\n");
814 }
815
816 /*
817 * ql_cmd_24xx_type_6_iocb
818 * Setup of ISP24xx command type 6 IOCB.
819 *
820 * Input:
821 * ha: adapter state pointer.
822 * req_q: request queue structure pointer.
823 * sp: srb structure pointer.
824 * arg: request queue packet.
825 *
826 * Context:
827 * Interrupt or Kernel context, no mailbox commands allowed.
828 */
829 static void
830 ql_cmd_24xx_type_6_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q,
831 ql_srb_t *sp, void *arg)
832 {
833 uint64_t addr;
834 ddi_dma_cookie_t *cp;
835 uint32_t *ptr32;
836 uint16_t seg_cnt;
837 fcp_cmd_t *fcp = sp->fcp;
838 ql_tgt_t *tq = sp->lun_queue->target_queue;
839 cmd6_24xx_entry_t *pkt = arg;
840 ql_adapter_state_t *pha = ha->pha;
841 dma_mem_t *cmem = &sp->sg_dma;
842 cmd6_2400_dma_t *cdma = cmem->bp;
843 fcp_ent_addr_t *fcp_ent_addr;
844
845 QL_PRINT_3(ha, "started\n");
846
847 pkt->entry_type = IOCB_CMD_TYPE_6;
848
849 bzero(cdma, sizeof (cmd6_2400_dma_t));
850
851 /* Set LUN number */
852 fcp_ent_addr = (fcp_ent_addr_t *)&sp->lun_queue->lun_addr;
853 pkt->fcp_lun[2] = cdma->cmd.fcp_lun[2] =
854 lobyte(fcp_ent_addr->ent_addr_0);
855 pkt->fcp_lun[3] = cdma->cmd.fcp_lun[3] =
856 hibyte(fcp_ent_addr->ent_addr_0);
857 pkt->fcp_lun[0] = cdma->cmd.fcp_lun[0] =
858 lobyte(fcp_ent_addr->ent_addr_1);
859 pkt->fcp_lun[1] = cdma->cmd.fcp_lun[1] =
860 hibyte(fcp_ent_addr->ent_addr_1);
861 pkt->fcp_lun[6] = cdma->cmd.fcp_lun[6] =
862 lobyte(fcp_ent_addr->ent_addr_2);
863 pkt->fcp_lun[7] = cdma->cmd.fcp_lun[7] =
864 hibyte(fcp_ent_addr->ent_addr_2);
865 pkt->fcp_lun[4] = cdma->cmd.fcp_lun[4] =
866 lobyte(fcp_ent_addr->ent_addr_3);
867 pkt->fcp_lun[5] = cdma->cmd.fcp_lun[5] =
868 hibyte(fcp_ent_addr->ent_addr_3);
869
870 /* Set N_port handle */
871 ddi_put16(req_q->req_ring.acc_handle, &pkt->n_port_hdl, tq->loop_id);
872
873 /* Set target ID */
874 pkt->target_id[0] = tq->d_id.b.al_pa;
875 pkt->target_id[1] = tq->d_id.b.area;
876 pkt->target_id[2] = tq->d_id.b.domain;
877
878 pkt->vp_index = ha->vp_index;
879
880 /* Set ISP command timeout. */
881 if (sp->isp_timeout < 0x1999) {
882 ddi_put16(req_q->req_ring.acc_handle, &pkt->timeout,
883 sp->isp_timeout);
884 }
885
886 /* Load SCSI CDB */
887 ddi_rep_put8(cmem->acc_handle, fcp->fcp_cdb, cdma->cmd.scsi_cdb,
888 MAX_CMDSZ, DDI_DEV_AUTOINCR);
889
890 /*
891 * Set tag queue control flags
892 * Note:
893 * Cannot copy fcp->fcp_cntl.cntl_qtype directly,
894 * problem with x86 in 32bit kernel mode
895 */
896 switch (fcp->fcp_cntl.cntl_qtype) {
897 case FCP_QTYPE_SIMPLE:
898 cdma->cmd.task = TA_STAG;
899 break;
900 case FCP_QTYPE_HEAD_OF_Q:
901 cdma->cmd.task = TA_HTAG;
902 break;
903 case FCP_QTYPE_ORDERED:
904 cdma->cmd.task = TA_OTAG;
905 break;
906 case FCP_QTYPE_ACA_Q_TAG:
907 cdma->cmd.task = TA_ACA;
908 break;
909 case FCP_QTYPE_UNTAGGED:
910 cdma->cmd.task = TA_UNTAGGED;
911 break;
912 default:
913 break;
914 }
915
916 /*
917 * FCP_CMND Payload Data Segment
918 */
919 cp = cmem->cookies;
920 ddi_put16(req_q->req_ring.acc_handle, &pkt->cmnd_length,
921 sizeof (fcp_cmnd_t));
922 ddi_put32(req_q->req_ring.acc_handle, &pkt->cmnd_address[0],
923 cp->dmac_address);
924 ddi_put32(req_q->req_ring.acc_handle, &pkt->cmnd_address[1],
925 cp->dmac_notused);
926
927 /* Set transfer direction. */
928 if (fcp->fcp_cntl.cntl_write_data) {
929 pkt->control_flags = (uint8_t)(CF_DSD_PTR | CF_WR);
930 cdma->cmd.control_flags = CF_WR;
931 pha->xioctl->IOOutputRequests++;
932 pha->xioctl->IOOutputByteCnt += fcp->fcp_data_len;
933 } else if (fcp->fcp_cntl.cntl_read_data) {
934 pkt->control_flags = (uint8_t)(CF_DSD_PTR | CF_RD);
935 cdma->cmd.control_flags = CF_RD;
936 pha->xioctl->IOInputRequests++;
937 pha->xioctl->IOInputByteCnt += fcp->fcp_data_len;
938 }
939
940 /*
941 * FCP_DATA Data Segment Descriptor.
942 */
943 addr = cp->dmac_laddress + sizeof (fcp_cmnd_t);
944 ddi_put32(req_q->req_ring.acc_handle, &pkt->dseg.address[0], LSD(addr));
945 ddi_put32(req_q->req_ring.acc_handle, &pkt->dseg.address[1], MSD(addr));
946
947 /* Set data segment count. */
948 seg_cnt = (uint16_t)sp->pkt->pkt_data_cookie_cnt;
949 ddi_put16(req_q->req_ring.acc_handle, &pkt->dseg_count, seg_cnt);
950 ddi_put32(req_q->req_ring.acc_handle, &pkt->dseg.length,
951 seg_cnt * 12 + 12);
952
953 /* Load total byte count. */
954 ddi_put32(req_q->req_ring.acc_handle, &pkt->total_byte_count,
955 fcp->fcp_data_len);
956 ddi_put32(cmem->acc_handle, &cdma->cmd.dl, (uint32_t)fcp->fcp_data_len);
957 ql_chg_endian((uint8_t *)&cdma->cmd.dl, 4);
958
959 /* Load command data segments. */
960 ptr32 = (uint32_t *)cdma->cookie_list;
961 cp = sp->pkt->pkt_data_cookie;
962 while (seg_cnt--) {
963 ddi_put32(cmem->acc_handle, ptr32++, cp->dmac_address);
964 ddi_put32(cmem->acc_handle, ptr32++, cp->dmac_notused);
965 ddi_put32(cmem->acc_handle, ptr32++, (uint32_t)cp->dmac_size);
966 cp++;
967 }
968
969 /* Sync DMA buffer. */
970 (void) ddi_dma_sync(cmem->dma_handle, 0, 0, DDI_DMA_SYNC_FORDEV);
971
972 QL_PRINT_3(ha, "done\n");
973 }
974
975 /*
976 * ql_marker
977 * Function issues marker IOCB.
978 *
979 * Input:
980 * ha: adapter state pointer.
981 * loop_id: device loop ID
982 * lq: LUN queue pointer.
983 * type: marker modifier
984 *
985 * Returns:
986 * ql local function return status code.
987 *
988 * Context:
989 * Interrupt or Kernel context, no mailbox commands allowed.
990 */
991 int
992 ql_marker(ql_adapter_state_t *ha, uint16_t loop_id, ql_lun_t *lq,
993 uint8_t type)
994 {
995 mrk_entry_t *pkt;
996 int rval;
997 ql_request_q_t *req_q = ha->req_q[0];
998 fcp_ent_addr_t *fcp_ent_addr;
999
1000 QL_PRINT_3(ha, "started\n");
1001
1002 rval = ql_req_pkt(ha, req_q, (request_t **)&pkt);
1003 if (rval == QL_SUCCESS) {
1004 pkt->entry_type = MARKER_TYPE;
1005
1006 if (CFG_IST(ha, CFG_ISP_FW_TYPE_2)) {
1007 marker_24xx_entry_t *pkt24 =
1008 (marker_24xx_entry_t *)pkt;
1009
1010 pkt24->modifier = type;
1011
1012 /* Set LUN number */
1013 if (lq) {
1014 fcp_ent_addr = (fcp_ent_addr_t *)&lq->lun_addr;
1015 pkt24->fcp_lun[2] =
1016 lobyte(fcp_ent_addr->ent_addr_0);
1017 pkt24->fcp_lun[3] =
1018 hibyte(fcp_ent_addr->ent_addr_0);
1019 pkt24->fcp_lun[0] =
1020 lobyte(fcp_ent_addr->ent_addr_1);
1021 pkt24->fcp_lun[1] =
1022 hibyte(fcp_ent_addr->ent_addr_1);
1023 pkt24->fcp_lun[6] =
1024 lobyte(fcp_ent_addr->ent_addr_2);
1025 pkt24->fcp_lun[7] =
1026 hibyte(fcp_ent_addr->ent_addr_2);
1027 pkt24->fcp_lun[4] =
1028 lobyte(fcp_ent_addr->ent_addr_3);
1029 pkt24->fcp_lun[5] =
1030 hibyte(fcp_ent_addr->ent_addr_3);
1031 }
1032
1033 pkt24->vp_index = ha->vp_index;
1034
1035 /* Set N_port handle */
1036 ddi_put16(req_q->req_ring.acc_handle,
1037 &pkt24->n_port_hdl, loop_id);
1038
1039 } else {
1040 pkt->modifier = type;
1041
1042 if (lq) {
1043 pkt->lun_l = LSB(lq->lun_no);
1044 pkt->lun_h = MSB(lq->lun_no);
1045 }
1046
1047 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1048 pkt->target_l = LSB(loop_id);
1049 pkt->target_h = MSB(loop_id);
1050 } else {
1051 pkt->target_h = LSB(loop_id);
1052 }
1053 }
1054
1055 /* Issue command to ISP */
1056 ql_isp_cmd(ha, req_q);
1057 }
1058
1059 if (rval != QL_SUCCESS) {
1060 EL(ha, "failed, rval = %xh\n", rval);
1061 } else {
1062 /*EMPTY*/
1063 QL_PRINT_3(ha, "done\n");
1064 }
1065 return (rval);
1066 }
1067
1068 /*
1069 * ql_ms_iocb
1070 * Setup of name/management server IOCB.
1071 *
1072 * Input:
1073 * ha: adapter state pointer.
1074 * req_q: request queue structure pointer.
1075 * sp: srb structure pointer.
1076 * arg: request queue packet.
1077 *
1078 * Context:
1079 * Interrupt or Kernel context, no mailbox commands allowed.
1080 */
1081 void
1082 ql_ms_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q, ql_srb_t *sp,
1083 void *arg)
1084 {
1085 ddi_dma_cookie_t *cp;
1086 uint32_t *ptr32;
1087 uint16_t seg_cnt;
1088 ql_tgt_t *tq = sp->lun_queue->target_queue;
1089 ms_entry_t *pkt = arg;
1090
1091 QL_PRINT_3(ha, "started\n");
1092 QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen);
1093 /*
1094 * Build command packet.
1095 */
1096 pkt->entry_type = MS_TYPE;
1097
1098 /* Set loop ID */
1099 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1100 pkt->loop_id_l = LSB(tq->loop_id);
1101 pkt->loop_id_h = MSB(tq->loop_id);
1102 } else {
1103 pkt->loop_id_h = LSB(tq->loop_id);
1104 }
1105
1106 /* Set ISP command timeout. */
1107 ddi_put16(req_q->req_ring.acc_handle, &pkt->timeout, sp->isp_timeout);
1108
1109 /* Set cmd data segment count. */
1110 pkt->cmd_dseg_count_l = 1;
1111
1112 /* Set total data segment count */
1113 seg_cnt = (uint16_t)(sp->pkt->pkt_resp_cookie_cnt + 1);
1114 ddi_put16(req_q->req_ring.acc_handle, &pkt->total_dseg_count, seg_cnt);
1115
1116 /* Load ct cmd byte count. */
1117 ddi_put32(req_q->req_ring.acc_handle, &pkt->cmd_byte_count,
1118 (uint32_t)sp->pkt->pkt_cmdlen);
1119
1120 /* Load ct rsp byte count. */
1121 ddi_put32(req_q->req_ring.acc_handle, &pkt->resp_byte_count,
1122 (uint32_t)sp->pkt->pkt_rsplen);
1123
1124 /* Load MS command data segments. */
1125 ptr32 = (uint32_t *)&pkt->dseg;
1126 cp = sp->pkt->pkt_cmd_cookie;
1127 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_address);
1128 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_notused);
1129 ddi_put32(req_q->req_ring.acc_handle, ptr32++, (uint32_t)cp->dmac_size);
1130 seg_cnt--;
1131
1132 /* Load MS response entry data segments. */
1133 cp = sp->pkt->pkt_resp_cookie;
1134 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_address);
1135 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_notused);
1136 ddi_put32(req_q->req_ring.acc_handle, ptr32, (uint32_t)cp->dmac_size);
1137 seg_cnt--;
1138 cp++;
1139
1140 /*
1141 * Build continuation packets.
1142 */
1143 if (seg_cnt) {
1144 ql_continuation_iocb(ha, req_q, cp, seg_cnt, B_TRUE);
1145 }
1146
1147 QL_PRINT_3(ha, "done\n");
1148 }
1149
1150 /*
1151 * ql_ms_24xx_iocb
1152 * Setup of name/management server IOCB.
1153 *
1154 * Input:
1155 * ha: adapter state pointer.
1156 * req_q: request queue structure pointer.
1157 * sp: srb structure pointer.
1158 * arg: request queue packet.
1159 *
1160 * Context:
1161 * Interrupt or Kernel context, no mailbox commands allowed.
1162 */
1163 void
1164 ql_ms_24xx_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q, ql_srb_t *sp,
1165 void *arg)
1166 {
1167 ddi_dma_cookie_t *cp;
1168 uint32_t *ptr32;
1169 uint16_t seg_cnt;
1170 ql_tgt_t *tq = sp->lun_queue->target_queue;
1171 ct_passthru_entry_t *pkt = arg;
1172 ql_adapter_state_t *pha = ha->pha;
1173
1174 QL_PRINT_3(ha, "started\n");
1175 QL_DUMP_3(sp->pkt->pkt_cmd, 8, sp->pkt->pkt_cmdlen);
1176 /*
1177 * Build command packet.
1178 */
1179 pkt->entry_type = CT_PASSTHRU_TYPE;
1180
1181 /* Set loop ID */
1182 ddi_put16(req_q->req_ring.acc_handle, &pkt->n_port_hdl, tq->loop_id);
1183
1184 pkt->vp_index = ha->vp_index;
1185
1186 /* Set ISP command timeout. */
1187 if (sp->isp_timeout < 0x1999) {
1188 ddi_put16(req_q->req_ring.acc_handle, &pkt->timeout,
1189 sp->isp_timeout);
1190 }
1191
1192 /* Set cmd/response data segment counts. */
1193 ddi_put16(req_q->req_ring.acc_handle, &pkt->cmd_dseg_count, 1);
1194 seg_cnt = (uint16_t)sp->pkt->pkt_resp_cookie_cnt;
1195 ddi_put16(req_q->req_ring.acc_handle, &pkt->resp_dseg_count, seg_cnt);
1196
1197 /* Load ct cmd byte count. */
1198 ddi_put32(req_q->req_ring.acc_handle, &pkt->cmd_byte_count,
1199 (uint32_t)sp->pkt->pkt_cmdlen);
1200
1201 /* Load ct rsp byte count. */
1202 ddi_put32(req_q->req_ring.acc_handle, &pkt->resp_byte_count,
1203 (uint32_t)sp->pkt->pkt_rsplen);
1204
1205 /* Load MS command entry data segments. */
1206 ptr32 = (uint32_t *)&pkt->dseg;
1207 cp = sp->pkt->pkt_cmd_cookie;
1208 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_address);
1209 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_notused);
1210 ddi_put32(req_q->req_ring.acc_handle, ptr32++, (uint32_t)cp->dmac_size);
1211
1212 /* Load MS response entry data segments. */
1213 cp = sp->pkt->pkt_resp_cookie;
1214 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_address);
1215 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_notused);
1216 ddi_put32(req_q->req_ring.acc_handle, ptr32, (uint32_t)cp->dmac_size);
1217 seg_cnt--;
1218 cp++;
1219
1220 /*
1221 * Build continuation packets.
1222 */
1223 if (seg_cnt) {
1224 ql_continuation_iocb(pha, req_q, cp, seg_cnt, B_TRUE);
1225 }
1226
1227 QL_PRINT_3(ha, "done\n");
1228 }
1229
1230 /*
1231 * ql_ip_iocb
1232 * Setup of IP IOCB.
1233 *
1234 * Input:
1235 * ha: adapter state pointer.
1236 * req_q: request queue structure pointer.
1237 * sp: srb structure pointer.
1238 * arg: request queue packet.
1239 *
1240 * Context:
1241 * Interrupt or Kernel context, no mailbox commands allowed.
1242 */
1243 void
1244 ql_ip_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q, ql_srb_t *sp,
1245 void *arg)
1246 {
1247 ddi_dma_cookie_t *cp;
1248 uint32_t *ptr32, cnt;
1249 uint16_t seg_cnt;
1250 ql_tgt_t *tq = sp->lun_queue->target_queue;
1251 ip_entry_t *pkt = arg;
1252 ip_a64_entry_t *pkt64 = arg;
1253
1254 QL_PRINT_3(ha, "started\n");
1255
1256 /* Set loop ID */
1257 if (CFG_IST(ha, CFG_EXT_FW_INTERFACE)) {
1258 pkt->loop_id_l = LSB(tq->loop_id);
1259 pkt->loop_id_h = MSB(tq->loop_id);
1260 } else {
1261 pkt->loop_id_h = LSB(tq->loop_id);
1262 }
1263
1264 /* Set control flags */
1265 pkt->control_flags_l = BIT_6;
1266 if (sp->pkt->pkt_tran_flags & FC_TRAN_HI_PRIORITY) {
1267 pkt->control_flags_h = BIT_7;
1268 }
1269
1270 /* Set ISP command timeout. */
1271 ddi_put16(req_q->req_ring.acc_handle, &pkt->timeout, sp->isp_timeout);
1272
1273 /* Set data segment count. */
1274 seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt;
1275 /* Load total byte count. */
1276 ddi_put32(req_q->req_ring.acc_handle, &pkt->byte_count,
1277 (uint32_t)sp->pkt->pkt_cmdlen);
1278 ddi_put16(req_q->req_ring.acc_handle, &pkt->dseg_count, seg_cnt);
1279
1280 /*
1281 * Build command packet.
1282 */
1283
1284 /* Load command entry data segments. */
1285 cp = sp->pkt->pkt_cmd_cookie;
1286
1287 if (CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)) {
1288 pkt64->entry_type = IP_A64_TYPE;
1289 cnt = IP_A64_DATA_SEGMENTS;
1290 ptr32 = (uint32_t *)&pkt64->dseg;
1291 while (cnt && seg_cnt) {
1292 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
1293 cp->dmac_address);
1294 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
1295 cp->dmac_notused);
1296 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
1297 (uint32_t)cp->dmac_size);
1298 seg_cnt--;
1299 cnt--;
1300 cp++;
1301 }
1302 } else {
1303 pkt->entry_type = IP_TYPE;
1304 cnt = IP_DATA_SEGMENTS;
1305 ptr32 = (uint32_t *)&pkt->dseg;
1306 while (cnt && seg_cnt) {
1307 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
1308 cp->dmac_address);
1309 ddi_put32(req_q->req_ring.acc_handle, ptr32++,
1310 (uint32_t)cp->dmac_size);
1311 seg_cnt--;
1312 cnt--;
1313 cp++;
1314 }
1315 }
1316
1317 /*
1318 * Build continuation packets.
1319 */
1320 if (seg_cnt) {
1321 ql_continuation_iocb(ha, req_q, cp, seg_cnt,
1322 (boolean_t)(CFG_IST(ha, CFG_ENABLE_64BIT_ADDRESSING)));
1323 }
1324
1325 QL_PRINT_3(ha, "done\n");
1326 }
1327
1328 /*
1329 * ql_ip_24xx_iocb
1330 * Setup of IP IOCB for ISP24xx.
1331 *
1332 * Input:
1333 * ha: adapter state pointer.
1334 * req_q: request queue structure pointer.
1335 * sp: srb structure pointer.
1336 * arg: request queue packet.
1337 *
1338 * Context:
1339 * Interrupt or Kernel context, no mailbox commands allowed.
1340 */
1341 void
1342 ql_ip_24xx_iocb(ql_adapter_state_t *ha, ql_request_q_t *req_q, ql_srb_t *sp,
1343 void *arg)
1344 {
1345 ddi_dma_cookie_t *cp;
1346 uint32_t *ptr32;
1347 uint16_t seg_cnt;
1348 ql_tgt_t *tq = sp->lun_queue->target_queue;
1349 ip_cmd_entry_t *pkt = arg;
1350
1351 pkt->entry_type = IP_CMD_TYPE;
1352
1353 QL_PRINT_3(ha, "started\n");
1354
1355 /* Set N_port handle */
1356 ddi_put16(req_q->req_ring.acc_handle, &pkt->hdl_status, tq->loop_id);
1357
1358 /* Set ISP command timeout. */
1359 if (sp->isp_timeout < 0x1999) {
1360 ddi_put16(req_q->req_ring.acc_handle, &pkt->timeout_hdl,
1361 sp->isp_timeout);
1362 }
1363
1364 /* Set data segment count. */
1365 seg_cnt = (uint16_t)sp->pkt->pkt_cmd_cookie_cnt;
1366 /* Load total byte count. */
1367 ddi_put32(req_q->req_ring.acc_handle, &pkt->byte_count,
1368 (uint32_t)sp->pkt->pkt_cmdlen);
1369 ddi_put16(req_q->req_ring.acc_handle, &pkt->dseg_count, seg_cnt);
1370
1371 /* Set control flags */
1372 ddi_put16(req_q->req_ring.acc_handle, &pkt->control_flags,
1373 (uint16_t)(BIT_0));
1374
1375 /* Set frame header control flags */
1376 ddi_put16(req_q->req_ring.acc_handle, &pkt->frame_hdr_cntrl_flgs,
1377 (uint16_t)(IPCF_LAST_SEQ | IPCF_FIRST_SEQ));
1378
1379 /* Load command data segment. */
1380 ptr32 = (uint32_t *)&pkt->dseg;
1381 cp = sp->pkt->pkt_cmd_cookie;
1382 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_address);
1383 ddi_put32(req_q->req_ring.acc_handle, ptr32++, cp->dmac_notused);
1384 ddi_put32(req_q->req_ring.acc_handle, ptr32, (uint32_t)cp->dmac_size);
1385 seg_cnt--;
1386 cp++;
1387
1388 /*
1389 * Build continuation packets.
1390 */
1391 if (seg_cnt) {
1392 ql_continuation_iocb(ha, req_q, cp, seg_cnt, B_TRUE);
1393 }
1394
1395 QL_PRINT_3(ha, "done\n");
1396 }
1397
1398 /*
1399 * ql_isp_rcvbuf
1400 * Locates free buffers and places it on the receive buffer queue.
1401 *
1402 * Input:
1403 * ha = adapter state pointer.
1404 *
1405 * Context:
1406 * Interrupt or Kernel context, no mailbox commands allowed.
1407 */
1408 void
1409 ql_isp_rcvbuf(ql_adapter_state_t *ha)
1410 {
1411 rcvbuf_t *container;
1412 uint16_t rcv_q_cnt;
1413 uint16_t index = 0;
1414 uint16_t index1 = 1;
1415 int debounce_count = QL_MAX_DEBOUNCE;
1416 ql_srb_t *sp;
1417 fc_unsol_buf_t *ubp;
1418 int ring_updated = FALSE;
1419
1420 if (CFG_IST(ha, CFG_CTRL_24XX)) {
1421 ql_isp24xx_rcvbuf(ha);
1422 return;
1423 }
1424
1425 QL_PRINT_3(ha, "started\n");
1426
1427 /* Acquire adapter state lock. */
1428 ADAPTER_STATE_LOCK(ha);
1429
1430 /* Calculate number of free receive buffer entries. */
1431 index = RD16_IO_REG(ha, mailbox_out[8]);
1432 do {
1433 index1 = RD16_IO_REG(ha, mailbox_out[8]);
1434 if (index1 == index) {
1435 break;
1436 } else {
1437 index = index1;
1438 }
1439 } while (debounce_count--);
1440
1441 if (debounce_count < 0) {
1442 /* This should never happen */
1443 EL(ha, "max mb8 debounce retries exceeded\n");
1444 }
1445
1446 rcv_q_cnt = (uint16_t)(ha->rcvbuf_ring_index < index ?
1447 index - ha->rcvbuf_ring_index : RCVBUF_CONTAINER_CNT -
1448 (ha->rcvbuf_ring_index - index));
1449
1450 if (rcv_q_cnt == RCVBUF_CONTAINER_CNT) {
1451 rcv_q_cnt--;
1452 }
1453
1454 /* Load all free buffers in ISP receive buffer ring. */
1455 index = 0;
1456 while (rcv_q_cnt > (uint16_t)0 && index < QL_UB_LIMIT) {
1457 /* Locate a buffer to give. */
1458 QL_UB_LOCK(ha);
1459 while (index < QL_UB_LIMIT) {
1460 ubp = ha->ub_array[index];
1461 if (ubp != NULL) {
1462 sp = ubp->ub_fca_private;
1463 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) &&
1464 (ha->flags & IP_INITIALIZED) &&
1465 (sp->flags & SRB_UB_IN_FCA) &&
1466 (!(sp->flags & (SRB_UB_IN_ISP |
1467 SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK |
1468 SRB_UB_ACQUIRED)))) {
1469 sp->flags |= SRB_UB_IN_ISP;
1470 break;
1471 }
1472 }
1473 index++;
1474 }
1475
1476 if (index < QL_UB_LIMIT) {
1477 rcv_q_cnt--;
1478 index++;
1479 container = ha->rcvbuf_ring_ptr;
1480
1481 /*
1482 * Build container.
1483 */
1484 ddi_put32(ha->rcv_ring.acc_handle,
1485 (uint32_t *)(void *)&container->bufp[0],
1486 sp->ub_buffer.cookie.dmac_address);
1487
1488 ddi_put32(ha->rcv_ring.acc_handle,
1489 (uint32_t *)(void *)&container->bufp[1],
1490 sp->ub_buffer.cookie.dmac_notused);
1491
1492 ddi_put16(ha->rcv_ring.acc_handle, &container->handle,
1493 LSW(sp->handle));
1494
1495 ha->ub_outcnt++;
1496
1497 /* Adjust ring index. */
1498 ha->rcvbuf_ring_index++;
1499 if (ha->rcvbuf_ring_index == RCVBUF_CONTAINER_CNT) {
1500 ha->rcvbuf_ring_index = 0;
1501 ha->rcvbuf_ring_ptr = ha->rcv_ring.bp;
1502 } else {
1503 ha->rcvbuf_ring_ptr++;
1504 }
1505
1506 ring_updated = TRUE;
1507 }
1508 QL_UB_UNLOCK(ha);
1509 }
1510
1511 if (ring_updated) {
1512 /* Sync queue. */
1513 (void) ddi_dma_sync(ha->rcv_ring.dma_handle, 0,
1514 (size_t)RCVBUF_QUEUE_SIZE, DDI_DMA_SYNC_FORDEV);
1515
1516 /* Set chip new ring index. */
1517 WRT16_IO_REG(ha, mailbox_in[8], ha->rcvbuf_ring_index);
1518 }
1519
1520 /* Release adapter state lock. */
1521 ADAPTER_STATE_UNLOCK(ha);
1522
1523 QL_PRINT_3(ha, "done\n");
1524 }
1525
1526 /*
1527 * ql_isp24xx_rcvbuf
1528 * Locates free buffers and send it to adapter.
1529 *
1530 * Input:
1531 * ha = adapter state pointer.
1532 *
1533 * Context:
1534 * Interrupt or Kernel context, no mailbox commands allowed.
1535 */
1536 static void
1537 ql_isp24xx_rcvbuf(ql_adapter_state_t *ha)
1538 {
1539 rcvbuf_t *container;
1540 uint16_t index;
1541 ql_srb_t *sp;
1542 fc_unsol_buf_t *ubp;
1543 int rval;
1544 ip_buf_pool_entry_t *pkt = NULL;
1545 ql_request_q_t *req_q = ha->req_q[0];
1546
1547 QL_PRINT_3(ha, "started\n");
1548
1549 for (;;) {
1550 /* Locate a buffer to give. */
1551 QL_UB_LOCK(ha);
1552 for (index = 0; index < QL_UB_LIMIT; index++) {
1553 ubp = ha->ub_array[index];
1554 if (ubp != NULL) {
1555 sp = ubp->ub_fca_private;
1556 if ((sp->ub_type == FC_TYPE_IS8802_SNAP) &&
1557 (ha->flags & IP_INITIALIZED) &&
1558 (sp->flags & SRB_UB_IN_FCA) &&
1559 (!(sp->flags & (SRB_UB_IN_ISP |
1560 SRB_UB_FREE_REQUESTED | SRB_UB_CALLBACK |
1561 SRB_UB_ACQUIRED)))) {
1562 ha->ub_outcnt++;
1563 sp->flags |= SRB_UB_IN_ISP;
1564 break;
1565 }
1566 }
1567 }
1568 QL_UB_UNLOCK(ha);
1569 if (index == QL_UB_LIMIT) {
1570 break;
1571 }
1572
1573 /* Get IOCB packet for buffers. */
1574 if (pkt == NULL) {
1575 rval = ql_req_pkt(ha, req_q, (request_t **)&pkt);
1576 if (rval != QL_SUCCESS) {
1577 EL(ha, "failed, ql_req_pkt=%x\n", rval);
1578 QL_UB_LOCK(ha);
1579 ha->ub_outcnt--;
1580 sp->flags &= ~SRB_UB_IN_ISP;
1581 QL_UB_UNLOCK(ha);
1582 break;
1583 }
1584 pkt->entry_type = IP_BUF_POOL_TYPE;
1585 container = &pkt->buffers[0];
1586 }
1587
1588 /*
1589 * Build container.
1590 */
1591 ddi_put32(req_q->req_ring.acc_handle, &container->bufp[0],
1592 sp->ub_buffer.cookie.dmac_address);
1593 ddi_put32(req_q->req_ring.acc_handle, &container->bufp[1],
1594 sp->ub_buffer.cookie.dmac_notused);
1595 ddi_put16(req_q->req_ring.acc_handle, &container->handle,
1596 LSW(sp->handle));
1597
1598 pkt->buffer_count++;
1599 container++;
1600
1601 if (pkt->buffer_count == IP_POOL_BUFFERS) {
1602 ql_isp_cmd(ha, req_q);
1603 pkt = NULL;
1604 }
1605 }
1606
1607 if (pkt != NULL) {
1608 ql_isp_cmd(ha, req_q);
1609 }
1610
1611 QL_PRINT_3(ha, "done\n");
1612 }