Print this page
MFV: illumos-gate@bbb9d5d65bf8372aae4b8821c80e218b8b832846
9994 cxgbe t4nex: Handle get_fl_payload() alloc failures
9995 cxgbe t4_devo_attach() should initialize ->sfl
Reviewed by: Toomas Soome <tsoome@me.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Approved by: Dan McDonald <danmcd@joyent.com>
Author: John Levon <john.levon@joyent.com>
9484 cxgbe should clean TX descriptors in timely manner
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Ryan Zezeski <rpz@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Toomas Soome <tsoome@me.com>
Approved by: Dan McDonald <danmcd@joyent.com>

*** 691,701 **** /* Deals with interrupts on the given ingress queue */ /* ARGSUSED */ uint_t t4_intr(caddr_t arg1, caddr_t arg2) { - /* LINTED: E_BAD_PTR_CAST_ALIGN */ struct sge_iq *iq = (struct sge_iq *)arg2; int state; /* Right now receive polling is only enabled for MSI-X and * when we have enough msi-x vectors i.e no interrupt forwarding. --- 691,700 ----
*** 774,787 **** if (iq->polling && ((received_bytes + pkt_len) > budget)) goto done; m = get_fl_payload(sc, fl, lq, &fl_bufs_used); ! if (m == NULL) { ! panic("%s: line %d.", __func__, ! __LINE__); ! } iq->intr_next = iq->intr_params; m->b_rptr += sc->sge.pktshift; if (sc->params.tp.rx_pkt_encap) /* It is enabled only in T6 config file */ --- 773,784 ---- if (iq->polling && ((received_bytes + pkt_len) > budget)) goto done; m = get_fl_payload(sc, fl, lq, &fl_bufs_used); ! if (m == NULL) ! goto done; iq->intr_next = iq->intr_params; m->b_rptr += sc->sge.pktshift; if (sc->params.tp.rx_pkt_encap) /* It is enabled only in T6 config file */
*** 807,820 **** break; } m = get_fl_payload(sc, fl, lq, &fl_bufs_used); ! if (m == NULL) { ! panic("%s: line %d.", __func__, ! __LINE__); ! } case X_RSPD_TYPE_CPL: ASSERT(rss->opcode < NUM_CPL_CMDS); sc->cpl_handler[rss->opcode](iq, rss, m); break; --- 804,816 ---- break; } m = get_fl_payload(sc, fl, lq, &fl_bufs_used); ! if (m == NULL) ! goto done; ! /* FALLTHROUGH */ case X_RSPD_TYPE_CPL: ASSERT(rss->opcode < NUM_CPL_CMDS); sc->cpl_handler[rss->opcode](iq, rss, m); break;
*** 859,868 **** --- 855,865 ---- struct rsp_ctrl *ctrl; const struct rss_header *rss; int ndescs = 0, limit, fl_bufs_used = 0; int rsp_type; uint32_t lq; + int starved; mblk_t *m; STAILQ_HEAD(, sge_iq) iql = STAILQ_HEAD_INITIALIZER(iql); limit = budget ? budget : iq->qsize / 8;
*** 885,897 **** ASSERT(iq->flags & IQ_HAS_FL); m = get_fl_payload(sc, fl, lq, &fl_bufs_used); if (m == NULL) { ! panic("%s: line %d.", __func__, ! __LINE__); } /* FALLTHRU */ case X_RSPD_TYPE_CPL: ASSERT(rss->opcode < NUM_CPL_CMDS); --- 882,909 ---- ASSERT(iq->flags & IQ_HAS_FL); m = get_fl_payload(sc, fl, lq, &fl_bufs_used); if (m == NULL) { ! /* ! * Rearm the iq with a ! * longer-than-default timer ! */ ! t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) | ! V_INGRESSQID((u32)iq->cntxt_id) | ! V_SEINTARM(V_QINTR_TIMER_IDX(SGE_NTIMERS-1))); ! if (fl_bufs_used > 0) { ! ASSERT(iq->flags & IQ_HAS_FL); ! FL_LOCK(fl); ! fl->needed += fl_bufs_used; ! starved = refill_fl(sc, fl, fl->cap / 8); ! FL_UNLOCK(fl); ! if (starved) ! add_fl_to_sfl(sc, fl); } + return (0); + } /* FALLTHRU */ case X_RSPD_TYPE_CPL: ASSERT(rss->opcode < NUM_CPL_CMDS);
*** 966,976 **** t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS), V_CIDXINC(ndescs) | V_INGRESSQID((u32)iq->cntxt_id) | V_SEINTARM(iq->intr_next)); if (iq->flags & IQ_HAS_FL) { - int starved; FL_LOCK(fl); fl->needed += fl_bufs_used; starved = refill_fl(sc, fl, fl->cap / 4); FL_UNLOCK(fl); --- 978,987 ----
*** 1251,1260 **** --- 1262,1272 ---- static inline void init_fl(struct sge_fl *fl, uint16_t qsize) { fl->qsize = qsize; + fl->allocb_fail = 0; } static inline void init_eq(struct adapter *sc, struct sge_eq *eq, uint16_t eqtype, uint16_t qsize, uint8_t tx_chan, uint16_t iqid)
*** 1685,1695 **** c.op_to_vfn = BE_32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | V_FW_EQ_ETH_CMD_VFN(0)); c.alloc_to_len16 = BE_32(F_FW_EQ_ETH_CMD_ALLOC | F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); ! c.autoequiqe_to_viid = BE_32(V_FW_EQ_ETH_CMD_VIID(pi->viid)); c.fetchszm_to_iqid = BE_32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(eq->iqid)); c.dcaen_to_eqsize = BE_32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) | --- 1697,1708 ---- c.op_to_vfn = BE_32(V_FW_CMD_OP(FW_EQ_ETH_CMD) | F_FW_CMD_REQUEST | F_FW_CMD_WRITE | F_FW_CMD_EXEC | V_FW_EQ_ETH_CMD_PFN(sc->pf) | V_FW_EQ_ETH_CMD_VFN(0)); c.alloc_to_len16 = BE_32(F_FW_EQ_ETH_CMD_ALLOC | F_FW_EQ_ETH_CMD_EQSTART | FW_LEN16(c)); ! c.autoequiqe_to_viid = BE_32(F_FW_EQ_ETH_CMD_AUTOEQUIQE | ! F_FW_EQ_ETH_CMD_AUTOEQUEQE | V_FW_EQ_ETH_CMD_VIID(pi->viid)); c.fetchszm_to_iqid = BE_32(V_FW_EQ_ETH_CMD_HOSTFCMODE(X_HOSTFCMODE_STATUS_PAGE) | V_FW_EQ_ETH_CMD_PCIECHN(eq->tx_chan) | F_FW_EQ_ETH_CMD_FETCHRO | V_FW_EQ_ETH_CMD_IQID(eq->iqid)); c.dcaen_to_eqsize = BE_32(V_FW_EQ_ETH_CMD_FBMIN(X_FETCHBURSTMIN_64B) |
*** 2328,2344 **** { struct mblk_pair frame = {0}; struct rxbuf *rxb; mblk_t *m = NULL; uint_t nbuf = 0, len, copy, n; ! uint32_t cidx, offset; /* * The SGE won't pack a new frame into the current buffer if the entire * payload doesn't fit in the remaining space. Move on to the next buf * in that case. */ if (fl->offset > 0 && len_newbuf & F_RSPD_NEWBUF) { fl->offset = 0; if (++fl->cidx == fl->cap) fl->cidx = 0; nbuf++; --- 2341,2359 ---- { struct mblk_pair frame = {0}; struct rxbuf *rxb; mblk_t *m = NULL; uint_t nbuf = 0, len, copy, n; ! uint32_t cidx, offset, rcidx, roffset; /* * The SGE won't pack a new frame into the current buffer if the entire * payload doesn't fit in the remaining space. Move on to the next buf * in that case. */ + rcidx = fl->cidx; + roffset = fl->offset; if (fl->offset > 0 && len_newbuf & F_RSPD_NEWBUF) { fl->offset = 0; if (++fl->cidx == fl->cap) fl->cidx = 0; nbuf++;
*** 2348,2360 **** len = G_RSPD_LEN(len_newbuf); /* pktshift + payload length */ copy = (len <= fl->copy_threshold); if (copy != 0) { frame.head = m = allocb(len, BPRI_HI); ! if (m == NULL) return (NULL); } while (len) { rxb = fl->sdesc[cidx].rxb; n = min(len, rxb->buf_size - offset); --- 2363,2382 ---- len = G_RSPD_LEN(len_newbuf); /* pktshift + payload length */ copy = (len <= fl->copy_threshold); if (copy != 0) { frame.head = m = allocb(len, BPRI_HI); ! if (m == NULL) { ! fl->allocb_fail++; ! cmn_err(CE_WARN,"%s: mbuf allocation failure " ! "count = %llu", __func__, ! (unsigned long long)fl->allocb_fail); ! fl->cidx = rcidx; ! fl->offset = roffset; return (NULL); } + } while (len) { rxb = fl->sdesc[cidx].rxb; n = min(len, rxb->buf_size - offset);
*** 2365,2375 **** bcopy(rxb->va + offset, m->b_wptr, n); else { m = desballoc((unsigned char *)rxb->va + offset, n, BPRI_HI, &rxb->freefunc); if (m == NULL) { ! freemsg(frame.head); return (NULL); } atomic_inc_uint(&rxb->ref_cnt); if (frame.head != NULL) frame.tail->b_cont = m; --- 2387,2405 ---- bcopy(rxb->va + offset, m->b_wptr, n); else { m = desballoc((unsigned char *)rxb->va + offset, n, BPRI_HI, &rxb->freefunc); if (m == NULL) { ! fl->allocb_fail++; ! cmn_err(CE_WARN, ! "%s: mbuf allocation failure " ! "count = %llu", __func__, ! (unsigned long long)fl->allocb_fail); ! if (frame.head) ! freemsgchain(frame.head); ! fl->cidx = rcidx; ! fl->offset = roffset; return (NULL); } atomic_inc_uint(&rxb->ref_cnt); if (frame.head != NULL) frame.tail->b_cont = m;
*** 3344,3368 **** --- 3374,3413 ---- * Deduct the number of descriptors posted */ fl->pending -= ndesc * 8; } + static void + tx_reclaim_task(void *arg) + { + struct sge_txq *txq = arg; + + TXQ_LOCK(txq); + reclaim_tx_descs(txq, txq->eq.qsize); + TXQ_UNLOCK(txq); + } + /* ARGSUSED */ static int handle_sge_egr_update(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m) { const struct cpl_sge_egr_update *cpl = (const void *)(rss + 1); unsigned int qid = G_EGR_QID(ntohl(cpl->opcode_qid)); struct adapter *sc = iq->adapter; struct sge *s = &sc->sge; + struct sge_eq *eq; struct sge_txq *txq; txq = (void *)s->eqmap[qid - s->eq_start]; + eq = &txq->eq; txq->qflush++; t4_mac_tx_update(txq->port, txq); + ddi_taskq_dispatch(sc->tq[eq->tx_chan], tx_reclaim_task, + (void *)txq, DDI_NOSLEEP); + return (0); } static int handle_fw_rpl(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m)