Print this page
NEX-1890 update oce from source provided by Emulex


   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /* Copyright © 2003-2011 Emulex. All rights reserved.  */



  23 


  24 /*
  25  * Source file containing the implementation of the MailBox queue handling
  26  * and related helper functions
  27  */
  28 
  29 #include <oce_impl.h>
  30 





























































































































  31 /*
  32  * function to drain a MCQ and process its CQEs
  33  *
  34  * dev - software handle to the device
  35  * cq - pointer to the cq to drain
  36  *
  37  * return the number of CQEs processed
  38  */
  39 uint16_t
  40 oce_drain_mq_cq(void *arg)
  41 {
  42         struct oce_mq_cqe *cqe = NULL;
  43         uint16_t num_cqe = 0;
  44         link_state_t link_status;
  45         struct oce_async_cqe_link_state *acqe;
  46         struct oce_mq *mq;
  47         struct oce_cq  *cq;
  48         struct oce_dev *dev;

  49 



  50         /* do while we do not reach a cqe that is not valid */
  51         mq = (struct oce_mq *)arg;
  52         cq = mq->cq;
  53         dev = mq->parent;
  54         mutex_enter(&mq->lock);

  55         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
  56         while (cqe->u0.dw[3]) {
  57                 DW_SWAP(u32ptr(cqe), sizeof (struct oce_mq_cqe));
  58                 if (cqe->u0.s.async_event) {
  59                         acqe = (struct oce_async_cqe_link_state *)cqe;
  60                         if (acqe->u0.s.event_code ==
  61                             ASYNC_EVENT_CODE_LINK_STATE) {
  62                                 /*
  63                                  * don't care logical or not,
  64                                  * just check up down
  65                                  */
  66 
  67                                 link_status = ((acqe->u0.s.link_status &
  68                                     ~ASYNC_EVENT_LOGICAL) ==
  69                                     ASYNC_EVENT_LINK_UP) ?
  70                                     LINK_STATE_UP: LINK_STATE_DOWN;
  71                                 mac_link_update(dev->mac_handle, link_status);
  72                                 dev->link_status = link_status;
  73                                 dev->link_speed = -1;

  74                         }
  75                 }
  76                 cqe->u0.dw[3] = 0;
  77                 RING_GET(cq->ring, 1);
  78                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
  79                 num_cqe++;
  80         } /* for all valid CQE */
  81         mutex_exit(&mq->lock);
  82         oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
  83         return (num_cqe);
  84 } /* oce_drain_mq_cq */
  85 
  86 int
  87 oce_start_mq(struct oce_mq *mq)
  88 {
  89         oce_arm_cq(mq->parent, mq->cq->cq_id, 0, B_TRUE);
  90         return (0);
  91 }
  92 
  93 
  94 void
  95 oce_clean_mq(struct oce_mq *mq)
  96 {
  97         struct oce_cq  *cq;
  98         struct oce_dev *dev;
  99         uint16_t num_cqe = 0;
 100         struct oce_mq_cqe *cqe = NULL;
 101 
 102         cq = mq->cq;
 103         dev = mq->parent;
 104         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
 105         while (cqe->u0.dw[3]) {
 106                 DW_SWAP(u32ptr(cqe), sizeof (struct oce_mq_cqe));
 107                 cqe->u0.dw[3] = 0;
 108                 RING_GET(cq->ring, 1);
 109                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
 110                 num_cqe++;
 111         } /* for all valid CQE */
 112         if (num_cqe)
 113                 oce_arm_cq(dev, cq->cq_id, num_cqe, B_FALSE);
 114         /* Drain the Event queue now */
 115         oce_drain_eq(mq->cq->eq);

























































































 116 }


   2  * CDDL HEADER START
   3  *
   4  * The contents of this file are subject to the terms of the
   5  * Common Development and Distribution License (the "License").
   6  * You may not use this file except in compliance with the License.
   7  *
   8  * You can obtain a copy of the license at usr/src/OPENSOLARIS.LICENSE
   9  * or http://www.opensolaris.org/os/licensing.
  10  * See the License for the specific language governing permissions
  11  * and limitations under the License.
  12  *
  13  * When distributing Covered Code, include this CDDL HEADER in each
  14  * file and include the License file at usr/src/OPENSOLARIS.LICENSE.
  15  * If applicable, add the following below this CDDL HEADER, with the
  16  * fields enclosed by brackets "[]" replaced with your own identifying
  17  * information: Portions Copyright [yyyy] [name of copyright owner]
  18  *
  19  * CDDL HEADER END
  20  */
  21 
  22 /*
  23  * Copyright (c) 2009-2012 Emulex. All rights reserved.
  24  * Use is subject to license terms.
  25  */
  26 
  27 
  28 
  29 /*
  30  * Source file containing the implementation of the MailBox queue handling
  31  * and related helper functions
  32  */
  33 
  34 #include <oce_impl.h>
  35 
  36 int pow10[5] = {
  37         0,
  38         10,
  39         100,
  40         1000,
  41         10000
  42 };
  43 
  44 static void oce_process_link_event(struct oce_dev *dev,
  45     struct oce_async_cqe_link_state *acqe);
  46 static void oce_async_grp5_qos_speed_process(struct oce_dev *dev,
  47     struct oce_async_event_grp5_qos_link_speed *acqe);
  48 static void oce_async_grp5_pvid_state(struct oce_dev *dev,
  49     struct oce_async_event_grp5_pvid_state *acqe);
  50 static void oce_async_grp5_cos_priority(struct oce_dev *dev,
  51     struct oce_async_event_grp5_cos_priority *acqe);
  52 void oce_process_grp5_event(struct oce_dev *dev,
  53     struct oce_mq_cqe *cqe, uint8_t event_type);
  54 static void oce_process_mq_compl(struct oce_dev *dev,
  55     struct oce_mq_cqe *cqe);
  56 static void oce_process_async_events(struct oce_dev *dev,
  57     struct oce_mq_cqe *cqe);
  58 
  59 
  60 static void
  61 oce_process_debug_event(struct oce_dev *dev,
  62     struct async_event_qnq *acqe, uint8_t event_type)
  63 {
  64         oce_log(dev, CE_NOTE, MOD_CONFIG,
  65             "Debug Event: type = %d, enabled = %d, tag = 0x%x",
  66             acqe->trailer.u0.bits.event_type, acqe->enabled, acqe->vlan_tag);
  67 
  68         if (event_type == ASYNC_DEBUG_EVENT_TYPE_QNQ) {
  69                 dev->QnQ_queried = 1;
  70                 dev->QnQ_valid = acqe->enabled;
  71                 dev->QnQ_tag = LE_16(acqe->vlan_tag);
  72 
  73                 if (!dev->QnQ_valid) {
  74                         dev->QnQ_tag = 0;
  75                 }
  76         }
  77 }
  78 
  79 static void
  80 oce_process_link_event(struct oce_dev *dev,
  81     struct oce_async_cqe_link_state *acqe)
  82 {
  83         link_state_t link_status;
  84 
  85         link_status = ((acqe->link_status & ~ASYNC_EVENT_LOGICAL) ==
  86             ASYNC_EVENT_LINK_UP) ? LINK_STATE_UP: LINK_STATE_DOWN;
  87 
  88         /* store the link status */
  89         dev->link_status = link_status;
  90 
  91         dev->link_speed = (acqe->qos_link_speed > 0) ?
  92             LE_16(acqe->qos_link_speed) * 10 : pow10[acqe->speed];
  93         dev->link_duplex = acqe->duplex;
  94 
  95         mac_link_update(dev->mac_handle, link_status);
  96         oce_log(dev, CE_NOTE, MOD_CONFIG, " Link Event"
  97             "Link Status %d Link Speed %d Link Duplex %d\n",
  98             dev->link_status, dev->link_speed, dev->link_duplex);
  99 }
 100 
 101 static void
 102 oce_async_grp5_qos_speed_process(struct oce_dev *dev,
 103     struct oce_async_event_grp5_qos_link_speed *acqe)
 104 {
 105 
 106         if (acqe->physical_port == dev->port_id) {
 107                 dev->link_speed = LE_16(acqe->qos_link_speed) * 10;
 108         }
 109         oce_log(dev, CE_NOTE, MOD_CONFIG, "GRP5 QOS_SPEED EVENT"
 110             "Physical Port : %d QOS_SPEED %d\n", acqe->physical_port,
 111             acqe->qos_link_speed);
 112 }
 113 
 114 static void
 115 oce_async_grp5_pvid_state(struct oce_dev *dev,
 116     struct oce_async_event_grp5_pvid_state *acqe)
 117 {
 118 
 119         if (acqe->enabled) {
 120                 dev->pvid = BE_16(acqe->tag);
 121         } else {
 122                 dev->pvid = 0;
 123         }
 124         oce_log(dev, CE_NOTE, MOD_CONFIG, "GRP5 PVID EVENT"
 125             "PVID Configured : 0x%x\n", dev->pvid);
 126 }
 127 
 128 static void
 129 oce_async_grp5_cos_priority(struct oce_dev *dev,
 130         struct oce_async_event_grp5_cos_priority *acqe)
 131 {
 132         if (acqe->valid) {
 133                 dev->vlan_prio_bmap = acqe->available_priority_bmap;
 134                 dev->reco_priority &= acqe->reco_default_priority;
 135         }
 136 
 137 }
 138 
 139 void
 140 oce_process_grp5_event(struct oce_dev *dev,
 141         struct oce_mq_cqe *cqe, uint8_t event_type)
 142 {
 143         switch (event_type) {
 144         case ASYNC_EVENT_QOS_SPEED:
 145                 oce_async_grp5_qos_speed_process(dev,
 146                     (struct oce_async_event_grp5_qos_link_speed *)cqe);
 147                 break;
 148         case ASYNC_EVENT_COS_PRIORITY:
 149                 oce_async_grp5_cos_priority(dev,
 150                     (struct oce_async_event_grp5_cos_priority *)cqe);
 151                 break;
 152         case ASYNC_EVENT_PVID_STATE:
 153                 oce_async_grp5_pvid_state(dev,
 154                     (struct oce_async_event_grp5_pvid_state *)cqe);
 155                 break;
 156         default:
 157                 break;
 158         }
 159 
 160 }
 161 /*
 162  * function to drain a MCQ and process its CQEs
 163  *
 164  * dev - software handle to the device
 165  * cq - pointer to the cq to drain
 166  *
 167  * return the number of CQEs processed
 168  */
 169 void *
 170 oce_drain_mq_cq(void *arg, int arg2, int arg3)
 171 {
 172         struct oce_mq_cqe *cqe = NULL;
 173         uint16_t num_cqe = 0;


 174         struct oce_mq *mq;
 175         struct oce_cq  *cq;
 176         struct oce_dev *dev;
 177         uint32_t flags = 0;
 178 
 179         _NOTE(ARGUNUSED(arg2));
 180         _NOTE(ARGUNUSED(arg3));
 181 
 182         /* do while we do not reach a cqe that is not valid */
 183         mq = (struct oce_mq *)arg;
 184         cq = mq->cq;
 185         dev = mq->parent;
 186 
 187         DBUF_SYNC(cq->ring->dbuf, 0, 0, DDI_DMA_SYNC_FORKERNEL);
 188         cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);










 189 
 190         while (MQ_CQE_VALID(cqe)) {
 191                 flags = LE_32(cqe->u0.dw[3]);
 192 
 193                 if (flags & MQ_CQE_ASYNC_MASK) {
 194                         oce_process_async_events(dev, cqe);
 195                 } else if (flags & MQ_CQE_COMPLETED_MASK) {
 196                         oce_process_mq_compl(dev, cqe);
 197                         atomic_add_32(&mq->mq_free, 1);
 198                 }
 199                 MQ_CQE_INVALIDATE(cqe);

 200                 RING_GET(cq->ring, 1);
 201                 cqe = RING_GET_CONSUMER_ITEM_VA(cq->ring, struct oce_mq_cqe);
 202                 num_cqe++;
 203         } /* for all valid CQE */
 204         DBUF_SYNC(cq->ring->dbuf, 0, 0, DDI_DMA_SYNC_FORDEV);
 205         oce_arm_cq(dev, cq->cq_id, num_cqe, B_TRUE);
 206         return (NULL);
 207 } /* oce_drain_mq_cq */
 208 
 209 int
 210 oce_start_mq(struct oce_mq *mq)
 211 {
 212         oce_arm_cq(mq->parent, mq->cq->cq_id, 0, B_TRUE);
 213         return (0);
 214 }
 215 
 216 
 217 void
 218 oce_clean_mq(struct oce_mq *mq)
 219 {
 220         while (mq->mq_free != mq->cfg.q_len) {
 221                 (void) oce_drain_mq_cq(mq, 0, 0);
 222         }














 223         /* Drain the Event queue now */
 224         oce_drain_eq(mq->cq->eq);
 225 }
 226 
 227 /* function to issue mbox on mq */
 228 int
 229 oce_issue_mq_mbox(struct  oce_dev *dev, struct  oce_mbx *mbx)
 230 {
 231         struct oce_mq *mq;
 232         struct oce_mbx *mqe;
 233         struct oce_mbx_ctx *mbctx;
 234         uint32_t mqdb = 0;
 235 
 236         mq = dev->mq;
 237         mbctx = (struct oce_mbx_ctx *)
 238             (uintptr_t)ADDR_64(mbx->tag[1], mbx->tag[0]);
 239 
 240         mutex_enter(&mq->lock);
 241 
 242         if (oce_atomic_reserve(&mq->mq_free, 1) < 0) {
 243                 mutex_exit(&mq->lock);
 244                 oce_log(dev, CE_NOTE, MOD_CONFIG,
 245                     "MQ Entries Free(%d) Retry the command Later",
 246                     mq->mq_free);
 247                 return (MBX_QUEUE_FULL);
 248         }
 249 
 250         mqe = RING_GET_PRODUCER_ITEM_VA(mq->ring, struct oce_mbx);
 251         /* save the mqe pointer in ctx required to copy resp back */
 252         mbctx->mqe = mqe;
 253         /* enqueue the command */
 254         bcopy(mbx, mqe, sizeof (struct oce_mbx));
 255         RING_PUT(mq->ring, 1);
 256         /* ring mq doorbell num posted is 1  */
 257         mqdb = (1 << 16) | mq->mq_id;
 258         OCE_DB_WRITE32(dev, PD_MQ_DB, mqdb);
 259         mutex_exit(&mq->lock);
 260         return (MBX_SUCCESS);
 261 }
 262 
 263 void
 264 oce_process_mq_compl(struct oce_dev *dev, struct oce_mq_cqe *cqe)
 265 {
 266         struct oce_mbx_ctx *mbctx;
 267         struct oce_mbx *mbx;
 268 
 269         _NOTE(ARGUNUSED(dev));
 270 
 271         /* retrieve the context pointer */
 272         mbctx = (struct oce_mbx_ctx *)(uintptr_t)ADDR_64(cqe->u0.s.mq_tag[1],
 273             cqe->u0.s.mq_tag[0]);
 274 
 275         if (mbctx == NULL) {
 276                 return;
 277         }
 278         mbx = mbctx->mbx;
 279 
 280         mbctx->compl_status = LE_32(cqe->u0.dw[0]);
 281         if (mbctx->compl_status == 0) {
 282                 bcopy(mbctx->mqe, mbx, sizeof (struct oce_mbx));
 283         }
 284         mutex_enter(&mbctx->cv_lock);
 285         mbctx->mbx_status = MBX_COMPLETED;
 286         cv_signal(&mbctx->cond_var);
 287         mutex_exit(&mbctx->cv_lock);
 288 
 289 }
 290 
 291 static void
 292 oce_process_async_events(struct oce_dev *dev, struct oce_mq_cqe *cqe)
 293 {
 294         struct oce_async_event_trailer trailer;
 295 
 296         trailer.u0.code = LE_32(cqe->u0.dw[3]);
 297 
 298         switch (trailer.u0.bits.event_code) {
 299         case ASYNC_EVENT_CODE_DEBUG:
 300                 oce_process_debug_event(dev, (struct async_event_qnq *)cqe,
 301                     trailer.u0.bits.event_type);
 302                 break;
 303         case ASYNC_EVENT_CODE_LINK_STATE:
 304                 oce_process_link_event(dev,
 305                     (struct oce_async_cqe_link_state *)cqe);
 306                 break;
 307         case ASYNC_EVENT_CODE_GRP_5:
 308                 oce_process_grp5_event(dev, cqe, trailer.u0.bits.event_type);
 309                 break;
 310 
 311         default:
 312                 break;
 313         }
 314 }