Print this page
    
MFV: illumos-gate@bbb9d5d65bf8372aae4b8821c80e218b8b832846
9994 cxgbe t4nex: Handle get_fl_payload() alloc failures
9995 cxgbe t4_devo_attach() should initialize ->sfl
Reviewed by: Toomas Soome <tsoome@me.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Approved by: Dan McDonald <danmcd@joyent.com>
Author: John Levon <john.levon@joyent.com>
9484 cxgbe should clean TX descriptors in timely manner
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Ryan Zezeski <rpz@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Toomas Soome <tsoome@me.com>
Approved by: Dan McDonald <danmcd@joyent.com>
    
      
        | Split | 
	Close | 
      
      | Expand all | 
      | Collapse all | 
    
    
          --- old/usr/src/uts/common/io/cxgbe/t4nex/adapter.h
          +++ new/usr/src/uts/common/io/cxgbe/t4nex/adapter.h
   1    1  /*
   2    2   * This file and its contents are supplied under the terms of the
   3    3   * Common Development and Distribution License ("CDDL"), version 1.0.
   4    4   * You may only use this file in accordance with the terms of version
   5    5   * 1.0 of the CDDL.
   6    6   *
   7    7   * A full copy of the text of the CDDL should have accompanied this
   8    8   * source. A copy of the CDDL is also available via the Internet at
   9    9   * http://www.illumos.org/license/CDDL.
  10   10   */
  11   11  
  12   12  /*
  13   13   * This file is part of the Chelsio T4 support code.
  14   14   *
  15   15   * Copyright (C) 2011-2013 Chelsio Communications.  All rights reserved.
  16   16   *
  17   17   * This program is distributed in the hope that it will be useful, but WITHOUT
  18   18   * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
  19   19   * FITNESS FOR A PARTICULAR PURPOSE.  See the LICENSE file included in this
  20   20   * release for licensing terms and conditions.
  21   21   */
  22   22  
  23   23  #ifndef __CXGBE_ADAPTER_H
  24   24  #define __CXGBE_ADAPTER_H
  25   25  
  26   26  #include <sys/ddi.h>
  27   27  #include <sys/mac_provider.h>
  28   28  #include <sys/ethernet.h>
  29   29  #include <sys/queue.h>
  30   30  
  31   31  #include "offload.h"
  32   32  #include "firmware/t4fw_interface.h"
  33   33  #include "shared.h"
  34   34  
  35   35  struct adapter;
  36   36  typedef struct adapter adapter_t;
  37   37  
  38   38  enum {
  39   39          FW_IQ_QSIZE = 256,
  40   40          FW_IQ_ESIZE = 64,       /* At least 64 mandated by the firmware spec */
  41   41  
  42   42          RX_IQ_QSIZE = 1024,
  43   43          RX_IQ_ESIZE = 64,       /* At least 64 so CPL_RX_PKT will fit */
  44   44  
  45   45          EQ_ESIZE = 64,          /* All egres queues use this entry size */
  46   46  
  47   47          RX_FL_ESIZE = 64,       /* 8 64bit addresses */
  48   48  
  49   49          FL_BUF_SIZES = 4,
  50   50  
  51   51          CTRL_EQ_QSIZE = 128,
  52   52  
  53   53          TX_EQ_QSIZE = 1024,
  54   54          TX_SGL_SEGS = 36,
  55   55          TX_WR_FLITS = SGE_MAX_WR_LEN / 8
  56   56  };
  57   57  
  58   58  enum {
  59   59          /* adapter flags */
  60   60          FULL_INIT_DONE  = (1 << 0),
  61   61          FW_OK           = (1 << 1),
  62   62          INTR_FWD        = (1 << 2),
  63   63          INTR_ALLOCATED  = (1 << 3),
  64   64          MASTER_PF       = (1 << 4),
  65   65  
  66   66          CXGBE_BUSY      = (1 << 9),
  67   67  
  68   68          /* port flags */
  69   69          DOOMED          = (1 << 0),
  70   70          PORT_INIT_DONE  = (1 << 1),
  71   71  };
  72   72  
  73   73  enum {
  74   74          /* Features */
  75   75          CXGBE_HW_LSO    = (1 << 0),
  76   76          CXGBE_HW_CSUM   = (1 << 1),
  77   77  };
  78   78  
  79   79  enum {
  80   80          UDBS_SEG_SHIFT  = 7,    /* log2(UDBS_SEG_SIZE) */
  81   81          UDBS_DB_OFFSET  = 8,    /* offset of the 4B doorbell in a segment */
  82   82          UDBS_WR_OFFSET  = 64,   /* offset of the work request in a segment */
  83   83  };
  84   84  
  85   85  #define IS_DOOMED(pi)   (pi->flags & DOOMED)
  86   86  #define SET_DOOMED(pi)  do { pi->flags |= DOOMED; } while (0)
  87   87  #define IS_BUSY(sc)     (sc->flags & CXGBE_BUSY)
  88   88  #define SET_BUSY(sc)    do { sc->flags |= CXGBE_BUSY; } while (0)
  89   89  #define CLR_BUSY(sc)    do { sc->flags &= ~CXGBE_BUSY; } while (0)
  90   90  
  91   91  struct port_info {
  92   92          PORT_INFO_HDR;
  93   93  
  94   94          kmutex_t lock;
  95   95          struct adapter *adapter;
  96   96  
  97   97  #ifdef TCP_OFFLOAD_ENABLE
  98   98          void *tdev;
  99   99  #endif
 100  100  
 101  101          unsigned int flags;
 102  102  
 103  103          uint16_t viid;
 104  104          int16_t  xact_addr_filt; /* index of exact MAC address filter */
 105  105          uint16_t rss_size;      /* size of VI's RSS table slice */
 106  106          uint16_t ntxq;          /* # of tx queues */
 107  107          uint16_t first_txq;     /* index of first tx queue */
 108  108          uint16_t nrxq;          /* # of rx queues */
 109  109          uint16_t first_rxq;     /* index of first rx queue */
 110  110  #ifdef TCP_OFFLOAD_ENABLE
 111  111          uint16_t nofldtxq;              /* # of offload tx queues */
 112  112          uint16_t first_ofld_txq;        /* index of first offload tx queue */
 113  113          uint16_t nofldrxq;              /* # of offload rx queues */
 114  114          uint16_t first_ofld_rxq;        /* index of first offload rx queue */
 115  115  #endif
 116  116          uint8_t  lport;         /* associated offload logical port */
 117  117          int8_t   mdio_addr;
 118  118          uint8_t  port_type;
 119  119          uint8_t  mod_type;
 120  120          uint8_t  port_id;
 121  121          uint8_t  tx_chan;
 122  122          uint8_t  rx_chan;
 123  123          uint8_t instance; /* Associated adapter instance */
 124  124          uint8_t child_inst; /* Associated child instance */
 125  125          uint8_t tmr_idx;
 126  126          int8_t  pktc_idx;
 127  127          struct link_config link_cfg;
 128  128          struct port_stats stats;
 129  129          uint32_t features;
 130  130          uint8_t macaddr_cnt;
 131  131          u8 rss_mode;
 132  132          u16 viid_mirror;
 133  133          kstat_t *ksp_config;
 134  134          kstat_t *ksp_info;
 135  135  };
 136  136  
 137  137  struct fl_sdesc {
 138  138          struct rxbuf *rxb;
 139  139  };
 140  140  
 141  141  struct tx_desc {
 142  142          __be64 flit[8];
 143  143  };
 144  144  
 145  145  /* DMA maps used for tx */
 146  146  struct tx_maps {
 147  147          ddi_dma_handle_t *map;
 148  148          uint32_t map_total;     /* # of DMA maps */
 149  149          uint32_t map_pidx;      /* next map to be used */
 150  150          uint32_t map_cidx;      /* reclaimed up to this index */
 151  151          uint32_t map_avail;     /* # of available maps */
 152  152  };
 153  153  
 154  154  struct tx_sdesc {
 155  155          mblk_t *m;
 156  156          uint32_t txb_used;      /* # of bytes of tx copy buffer used */
 157  157          uint16_t hdls_used;     /* # of dma handles used */
 158  158          uint16_t desc_used;     /* # of hardware descriptors used */
 159  159  };
 160  160  
 161  161  enum {
 162  162          /* iq flags */
 163  163          IQ_ALLOCATED    = (1 << 0),     /* firmware resources allocated */
 164  164          IQ_INTR         = (1 << 1),     /* iq takes direct interrupt */
 165  165          IQ_HAS_FL       = (1 << 2),     /* iq has fl */
 166  166  
 167  167          /* iq state */
 168  168          IQS_DISABLED    = 0,
 169  169          IQS_BUSY        = 1,
 170  170          IQS_IDLE        = 2,
 171  171  };
 172  172  
 173  173  /*
 174  174   * Ingress Queue: T4 is producer, driver is consumer.
 175  175   */
 176  176  struct sge_iq {
 177  177          unsigned int flags;
 178  178          ddi_dma_handle_t dhdl;
 179  179          ddi_acc_handle_t ahdl;
 180  180  
 181  181          volatile uint_t state;
 182  182          __be64 *desc;           /* KVA of descriptor ring */
 183  183          uint64_t ba;            /* bus address of descriptor ring */
 184  184          const __be64 *cdesc;    /* current descriptor */
 185  185          struct adapter *adapter; /* associated  adapter */
 186  186          uint8_t  gen;           /* generation bit */
 187  187          uint8_t  intr_params;   /* interrupt holdoff parameters */
 188  188          int8_t   intr_pktc_idx; /* packet count threshold index */
 189  189          uint8_t  intr_next;     /* holdoff for next interrupt */
 190  190          uint8_t  esize;         /* size (bytes) of each entry in the queue */
 191  191          uint16_t qsize;         /* size (# of entries) of the queue */
 192  192          uint16_t cidx;          /* consumer index */
 193  193          uint16_t pending;       /* # of descs processed since last doorbell */
 194  194          uint16_t cntxt_id;      /* SGE context id  for the iq */
 195  195          uint16_t abs_id;        /* absolute SGE id for the iq */
 196  196          kmutex_t lock;          /* Rx access lock */
 197  197          uint8_t polling;
 198  198  
 199  199          STAILQ_ENTRY(sge_iq) link;
 200  200  };
 201  201  
 202  202  enum {
 203  203          EQ_CTRL         = 1,
 204  204          EQ_ETH          = 2,
 205  205  #ifdef TCP_OFFLOAD_ENABLE
 206  206          EQ_OFLD         = 3,
 207  207  #endif
 208  208  
 209  209          /* eq flags */
 210  210          EQ_TYPEMASK     = 7,            /* 3 lsbits hold the type */
 211  211          EQ_ALLOCATED    = (1 << 3),     /* firmware resources allocated */
 212  212          EQ_DOOMED       = (1 << 4),     /* about to be destroyed */
 213  213          EQ_CRFLUSHED    = (1 << 5),     /* expecting an update from SGE */
 214  214          EQ_STALLED      = (1 << 6),     /* out of hw descriptors or dmamaps */
 215  215          EQ_MTX          = (1 << 7),     /* mutex has been initialized */
 216  216          EQ_STARTED      = (1 << 8),     /* started */
 217  217  };
 218  218  
 219  219  /* Listed in order of preference.  Update t4_sysctls too if you change these */
 220  220  enum {DOORBELL_UDB=0x1 , DOORBELL_WCWR=0x2, DOORBELL_UDBWC=0x4, DOORBELL_KDB=0x8};
 221  221  
 222  222  /*
 223  223   * Egress Queue: driver is producer, T4 is consumer.
 224  224   *
 225  225   * Note: A free list is an egress queue (driver produces the buffers and T4
 226  226   * consumes them) but it's special enough to have its own struct (see sge_fl).
 227  227   */
 228  228  struct sge_eq {
 229  229          ddi_dma_handle_t desc_dhdl;
 230  230          ddi_acc_handle_t desc_ahdl;
 231  231          unsigned int flags;
 232  232          kmutex_t lock;
 233  233  
 234  234          struct tx_desc *desc;   /* KVA of descriptor ring */
 235  235          uint64_t ba;            /* bus address of descriptor ring */
 236  236          struct sge_qstat *spg;  /* status page, for convenience */
 237  237          int doorbells;
 238  238          volatile uint32_t *udb; /* KVA of doorbell (lies within BAR2) */
 239  239          u_int udb_qid;          /* relative qid within the doorbell page */
 240  240          uint16_t cap;           /* max # of desc, for convenience */
 241  241          uint16_t avail;         /* available descriptors, for convenience */
 242  242          uint16_t qsize;         /* size (# of entries) of the queue */
 243  243          uint16_t cidx;          /* consumer idx (desc idx) */
 244  244          uint16_t pidx;          /* producer idx (desc idx) */
 245  245          uint16_t pending;       /* # of descriptors used since last doorbell */
 246  246          uint16_t iqid;          /* iq that gets egr_update for the eq */
 247  247          uint8_t tx_chan;        /* tx channel used by the eq */
 248  248          uint32_t cntxt_id;      /* SGE context id for the eq */
 249  249  };
 250  250  
 251  251  enum {
 252  252          /* fl flags */
 253  253          FL_MTX          = (1 << 0),     /* mutex has been initialized */
 254  254          FL_STARVING     = (1 << 1),     /* on the list of starving fl's */
 255  255          FL_DOOMED       = (1 << 2),     /* about to be destroyed */
 256  256  };
 257  257  
 258  258  #define FL_RUNNING_LOW(fl)      (fl->cap - fl->needed <= fl->lowat)
 259  259  #define FL_NOT_RUNNING_LOW(fl)  (fl->cap - fl->needed >= 2 * fl->lowat)
 260  260  
 261  261  struct sge_fl {
 262  262          unsigned int flags;
 263  263          kmutex_t lock;
 264  264          ddi_dma_handle_t dhdl;
 265  265          ddi_acc_handle_t ahdl;
 266  266  
 267  267          __be64 *desc;           /* KVA of descriptor ring, ptr to addresses */
 268  268          uint64_t ba;            /* bus address of descriptor ring */
 269  269          struct fl_sdesc *sdesc; /* KVA of software descriptor ring */
 270  270          uint32_t cap;           /* max # of buffers, for convenience */
 271  271          uint16_t qsize;         /* size (# of entries) of the queue */
 272  272          uint16_t cntxt_id;      /* SGE context id for the freelist */
  
    | 
      ↓ open down ↓ | 
    272 lines elided | 
    
      ↑ open up ↑ | 
  
 273  273          uint32_t cidx;          /* consumer idx (buffer idx, NOT hw desc idx) */
 274  274          uint32_t pidx;          /* producer idx (buffer idx, NOT hw desc idx) */
 275  275          uint32_t needed;        /* # of buffers needed to fill up fl. */
 276  276          uint32_t lowat;         /* # of buffers <= this means fl needs help */
 277  277          uint32_t pending;       /* # of bufs allocated since last doorbell */
 278  278          uint32_t offset;        /* current packet within the larger buffer */
 279  279          uint16_t copy_threshold; /* anything this size or less is copied up */
 280  280  
 281  281          uint64_t copied_up;     /* # of frames copied into mblk and handed up */
 282  282          uint64_t passed_up;     /* # of frames wrapped in mblk and handed up */
      283 +        uint64_t allocb_fail;   /* # of mblk allocation failures */
 283  284  
 284  285          TAILQ_ENTRY(sge_fl) link; /* All starving freelists */
 285  286  };
 286  287  
 287  288  /* txq: SGE egress queue + miscellaneous items */
 288  289  struct sge_txq {
 289  290          struct sge_eq eq;       /* MUST be first */
 290  291  
 291  292          struct port_info *port; /* the port this txq belongs to */
 292  293          struct tx_sdesc *sdesc; /* KVA of software descriptor ring */
 293  294          mac_ring_handle_t ring_handle;
 294  295  
 295  296          /* DMA handles used for tx */
 296  297          ddi_dma_handle_t *tx_dhdl;
 297  298          uint32_t tx_dhdl_total; /* Total # of handles */
 298  299          uint32_t tx_dhdl_pidx;  /* next handle to be used */
 299  300          uint32_t tx_dhdl_cidx;  /* reclaimed up to this index */
 300  301          uint32_t tx_dhdl_avail; /* # of available handles */
 301  302  
 302  303          /* Copy buffers for tx */
 303  304          ddi_dma_handle_t txb_dhdl;
 304  305          ddi_acc_handle_t txb_ahdl;
 305  306          caddr_t txb_va;         /* KVA of copy buffers area */
 306  307          uint64_t txb_ba;        /* bus address of copy buffers area */
 307  308          uint32_t txb_size;      /* total size */
 308  309          uint32_t txb_next;      /* offset of next useable area in the buffer */
 309  310          uint32_t txb_avail;     /* # of bytes available */
 310  311          uint16_t copy_threshold; /* anything this size or less is copied up */
 311  312  
 312  313          uint64_t txpkts;        /* # of ethernet packets */
 313  314          uint64_t txbytes;       /* # of ethernet bytes */
 314  315          kstat_t *ksp;
 315  316  
 316  317          /* stats for common events first */
 317  318  
 318  319          uint64_t txcsum;        /* # of times hardware assisted with checksum */
 319  320          uint64_t tso_wrs;       /* # of IPv4 TSO work requests */
 320  321          uint64_t imm_wrs;       /* # of work requests with immediate data */
 321  322          uint64_t sgl_wrs;       /* # of work requests with direct SGL */
 322  323          uint64_t txpkt_wrs;     /* # of txpkt work requests (not coalesced) */
 323  324          uint64_t txpkts_wrs;    /* # of coalesced tx work requests */
 324  325          uint64_t txpkts_pkts;   /* # of frames in coalesced tx work requests */
 325  326          uint64_t txb_used;      /* # of tx copy buffers used (64 byte each) */
 326  327          uint64_t hdl_used;      /* # of DMA handles used */
 327  328  
 328  329          /* stats for not-that-common events */
 329  330  
 330  331          uint32_t txb_full;      /* txb ran out of space */
 331  332          uint32_t dma_hdl_failed; /* couldn't obtain DMA handle */
 332  333          uint32_t dma_map_failed; /* couldn't obtain DMA mapping */
 333  334          uint32_t qfull;         /* out of hardware descriptors */
 334  335          uint32_t qflush;        /* # of SGE_EGR_UPDATE notifications for txq */
 335  336          uint32_t pullup_early;  /* # of pullups before starting frame's SGL */
 336  337          uint32_t pullup_late;   /* # of pullups while building frame's SGL */
 337  338          uint32_t pullup_failed; /* # of failed pullups */
 338  339  };
 339  340  
 340  341  /* rxq: SGE ingress queue + SGE free list + miscellaneous items */
 341  342  struct sge_rxq {
 342  343          struct sge_iq iq;       /* MUST be first */
 343  344          struct sge_fl fl;
 344  345  
 345  346          struct port_info *port; /* the port this rxq belongs to */
 346  347          kstat_t *ksp;
 347  348  
 348  349          mac_ring_handle_t ring_handle;
 349  350          uint64_t ring_gen_num;
 350  351  
 351  352          /* stats for common events first */
 352  353  
 353  354          uint64_t rxcsum;        /* # of times hardware assisted with checksum */
 354  355          uint64_t rxpkts;        /* # of ethernet packets */
 355  356          uint64_t rxbytes;       /* # of ethernet bytes */
 356  357  
 357  358          /* stats for not-that-common events */
 358  359  
 359  360          uint32_t nomem;         /* mblk allocation during rx failed */
 360  361  };
 361  362  
 362  363  #ifdef TCP_OFFLOAD_ENABLE
 363  364  /* ofld_rxq: SGE ingress queue + SGE free list + miscellaneous items */
 364  365  struct sge_ofld_rxq {
 365  366          struct sge_iq iq;       /* MUST be first */
 366  367          struct sge_fl fl;
 367  368  };
 368  369  
 369  370  /*
 370  371   * wrq: SGE egress queue that is given prebuilt work requests.  Both the control
 371  372   * and offload tx queues are of this type.
 372  373   */
 373  374  struct sge_wrq {
 374  375          struct sge_eq eq;       /* MUST be first */
 375  376  
 376  377          struct adapter *adapter;
 377  378  
 378  379          /* List of WRs held up due to lack of tx descriptors */
 379  380          struct mblk_pair wr_list;
 380  381  
 381  382          /* stats for common events first */
 382  383  
 383  384          uint64_t tx_wrs;        /* # of tx work requests */
 384  385  
 385  386          /* stats for not-that-common events */
 386  387  
 387  388          uint32_t no_desc;       /* out of hardware descriptors */
 388  389  };
 389  390  #endif
 390  391  
 391  392  struct sge {
 392  393          int fl_starve_threshold;
 393  394          int s_qpp;
 394  395  
 395  396          int nrxq;       /* total rx queues (all ports and the rest) */
 396  397          int ntxq;       /* total tx queues (all ports and the rest) */
 397  398  #ifdef TCP_OFFLOAD_ENABLE
 398  399          int nofldrxq;   /* total # of TOE rx queues */
 399  400          int nofldtxq;   /* total # of TOE tx queues */
 400  401  #endif
 401  402          int niq;        /* total ingress queues */
 402  403          int neq;        /* total egress queues */
 403  404          int stat_len;   /* length of status page at ring end */
 404  405          int pktshift;   /* padding between CPL & packet data */
 405  406          int fl_align;   /* response queue message alignment */
 406  407  
 407  408          struct sge_iq fwq;      /* Firmware event queue */
 408  409  #ifdef TCP_OFFLOAD_ENABLE
 409  410          struct sge_wrq mgmtq;   /* Management queue (Control queue) */
 410  411  #endif
 411  412          struct sge_txq *txq;    /* NIC tx queues */
 412  413          struct sge_rxq *rxq;    /* NIC rx queues */
 413  414  #ifdef TCP_OFFLOAD_ENABLE
 414  415          struct sge_wrq *ctrlq;  /* Control queues */
 415  416          struct sge_wrq *ofld_txq;       /* TOE tx queues */
 416  417          struct sge_ofld_rxq *ofld_rxq;  /* TOE rx queues */
 417  418  #endif
 418  419  
 419  420          uint16_t iq_start;
 420  421          int eq_start;
 421  422          struct sge_iq **iqmap;  /* iq->cntxt_id to iq mapping */
 422  423          struct sge_eq **eqmap;  /* eq->cntxt_id to eq mapping */
 423  424  
 424  425          /* Device access and DMA attributes for all the descriptor rings */
 425  426          ddi_device_acc_attr_t acc_attr_desc;
 426  427          ddi_dma_attr_t  dma_attr_desc;
 427  428  
 428  429          /* Device access and DMA attributes for tx buffers */
 429  430          ddi_device_acc_attr_t acc_attr_tx;
 430  431          ddi_dma_attr_t  dma_attr_tx;
 431  432  
 432  433          /* Device access and DMA attributes for rx buffers are in rxb_params */
 433  434          kmem_cache_t *rxbuf_cache;
 434  435          struct rxbuf_cache_params rxb_params;
 435  436  };
 436  437  
 437  438  struct driver_properties {
 438  439          /* There is a driver.conf variable for each of these */
 439  440          int max_ntxq_10g;
 440  441          int max_nrxq_10g;
 441  442          int max_ntxq_1g;
 442  443          int max_nrxq_1g;
 443  444  #ifdef TCP_OFFLOAD_ENABLE
 444  445          int max_nofldtxq_10g;
 445  446          int max_nofldrxq_10g;
 446  447          int max_nofldtxq_1g;
 447  448          int max_nofldrxq_1g;
 448  449  #endif
 449  450          int intr_types;
 450  451          int tmr_idx_10g;
 451  452          int pktc_idx_10g;
 452  453          int tmr_idx_1g;
 453  454          int pktc_idx_1g;
 454  455          int qsize_txq;
 455  456          int qsize_rxq;
 456  457  
 457  458          int timer_val[SGE_NTIMERS];
 458  459          int counter_val[SGE_NCOUNTERS];
 459  460  
 460  461          int wc;
 461  462  
 462  463          int multi_rings;
 463  464          int t4_fw_install;
 464  465  };
 465  466  
 466  467  struct rss_header;
 467  468  typedef int (*cpl_handler_t)(struct sge_iq *, const struct rss_header *,
 468  469      mblk_t *);
 469  470  typedef int (*fw_msg_handler_t)(struct adapter *, const __be64 *);
 470  471  
 471  472  struct adapter {
 472  473          SLIST_ENTRY(adapter) link;
 473  474          dev_info_t *dip;
 474  475          dev_t dev;
 475  476  
 476  477          unsigned int pf;
 477  478          unsigned int mbox;
 478  479  
 479  480          unsigned int vpd_busy;
 480  481          unsigned int vpd_flag;
 481  482  
 482  483          u32 t4_bar0;
 483  484  
 484  485          uint_t open;    /* character device is open */
 485  486  
 486  487          /* PCI config space access handle */
 487  488          ddi_acc_handle_t pci_regh;
 488  489  
 489  490          /* MMIO register access handle */
 490  491          ddi_acc_handle_t regh;
 491  492          caddr_t regp;
 492  493          /* BAR1 register access handle */
 493  494          ddi_acc_handle_t reg1h;
 494  495          caddr_t reg1p;
 495  496  
 496  497          /* Interrupt information */
 497  498          int intr_type;
 498  499          int intr_count;
 499  500          int intr_cap;
  
    | 
      ↓ open down ↓ | 
    207 lines elided | 
    
      ↑ open up ↑ | 
  
 500  501          uint_t intr_pri;
 501  502          ddi_intr_handle_t *intr_handle;
 502  503  
 503  504          struct driver_properties props;
 504  505          kstat_t *ksp;
 505  506          kstat_t *ksp_stat;
 506  507  
 507  508          struct sge sge;
 508  509  
 509  510          struct port_info *port[MAX_NPORTS];
      511 +        ddi_taskq_t *tq[NCHAN];
 510  512          uint8_t chan_map[NCHAN];
 511  513          uint32_t filter_mode;
 512  514  
 513  515          struct l2t_data *l2t;   /* L2 table */
 514  516          struct tid_info tids;
 515  517  
 516  518          int doorbells;
 517  519          int registered_device_map;
 518  520          int open_device_map;
 519  521          int flags;
 520  522  
 521  523          unsigned int cfcsum;
 522  524          struct adapter_params params;
 523  525          struct t4_virt_res vres;
 524  526  
 525  527  #ifdef TCP_OFFLOAD_ENABLE
 526  528          struct uld_softc tom;
 527  529          struct tom_tunables tt;
 528  530  #endif
 529  531  
 530  532  #ifdef TCP_OFFLOAD_ENABLE
 531  533          int offload_map;
 532  534  #endif
 533  535          uint16_t linkcaps;
 534  536          uint16_t niccaps;
 535  537          uint16_t toecaps;
 536  538          uint16_t rdmacaps;
 537  539          uint16_t iscsicaps;
 538  540          uint16_t fcoecaps;
 539  541  
 540  542          fw_msg_handler_t fw_msg_handler[5]; /* NUM_FW6_TYPES */
 541  543          cpl_handler_t cpl_handler[0xef]; /* NUM_CPL_CMDS */
 542  544  
 543  545          kmutex_t lock;
 544  546          kcondvar_t cv;
 545  547  
 546  548          /* Starving free lists */
 547  549          kmutex_t sfl_lock;      /* same cache-line as sc_lock? but that's ok */
 548  550          TAILQ_HEAD(, sge_fl) sfl;
 549  551          timeout_id_t sfl_timer;
 550  552  };
 551  553  
 552  554  enum {
 553  555          NIC_H = 0,
 554  556          TOM_H,
 555  557          IW_H,
 556  558          ISCSI_H
 557  559  };
 558  560  
 559  561  struct memwin {
 560  562          uint32_t base;
 561  563          uint32_t aperture;
 562  564  };
 563  565  
 564  566  #define ADAPTER_LOCK(sc)                mutex_enter(&(sc)->lock)
 565  567  #define ADAPTER_UNLOCK(sc)              mutex_exit(&(sc)->lock)
 566  568  #define ADAPTER_LOCK_ASSERT_OWNED(sc)   ASSERT(mutex_owned(&(sc)->lock))
 567  569  #define ADAPTER_LOCK_ASSERT_NOTOWNED(sc) ASSERT(!mutex_owned(&(sc)->lock))
 568  570  
 569  571  #define PORT_LOCK(pi)                   mutex_enter(&(pi)->lock)
 570  572  #define PORT_UNLOCK(pi)                 mutex_exit(&(pi)->lock)
 571  573  #define PORT_LOCK_ASSERT_OWNED(pi)      ASSERT(mutex_owned(&(pi)->lock))
 572  574  #define PORT_LOCK_ASSERT_NOTOWNED(pi)   ASSERT(!mutex_owned(&(pi)->lock))
 573  575  
 574  576  #define IQ_LOCK(iq)                     mutex_enter(&(iq)->lock)
 575  577  #define IQ_UNLOCK(iq)                   mutex_exit(&(iq)->lock)
 576  578  #define IQ_LOCK_ASSERT_OWNED(iq)        ASSERT(mutex_owned(&(iq)->lock))
 577  579  #define IQ_LOCK_ASSERT_NOTOWNED(iq)     ASSERT(!mutex_owned(&(iq)->lock))
 578  580  
 579  581  #define FL_LOCK(fl)                     mutex_enter(&(fl)->lock)
 580  582  #define FL_UNLOCK(fl)                   mutex_exit(&(fl)->lock)
 581  583  #define FL_LOCK_ASSERT_OWNED(fl)        ASSERT(mutex_owned(&(fl)->lock))
 582  584  #define FL_LOCK_ASSERT_NOTOWNED(fl)     ASSERT(!mutex_owned(&(fl)->lock))
 583  585  
 584  586  #define RXQ_LOCK(rxq)                   IQ_LOCK(&(rxq)->iq)
 585  587  #define RXQ_UNLOCK(rxq)                 IQ_UNLOCK(&(rxq)->iq)
 586  588  #define RXQ_LOCK_ASSERT_OWNED(rxq)      IQ_LOCK_ASSERT_OWNED(&(rxq)->iq)
 587  589  #define RXQ_LOCK_ASSERT_NOTOWNED(rxq)   IQ_LOCK_ASSERT_NOTOWNED(&(rxq)->iq)
 588  590  
 589  591  #define RXQ_FL_LOCK(rxq)                FL_LOCK(&(rxq)->fl)
 590  592  #define RXQ_FL_UNLOCK(rxq)              FL_UNLOCK(&(rxq)->fl)
 591  593  #define RXQ_FL_LOCK_ASSERT_OWNED(rxq)   FL_LOCK_ASSERT_OWNED(&(rxq)->fl)
 592  594  #define RXQ_FL_LOCK_ASSERT_NOTOWNED(rxq) FL_LOCK_ASSERT_NOTOWNED(&(rxq)->fl)
 593  595  
 594  596  #define EQ_LOCK(eq)                     mutex_enter(&(eq)->lock)
 595  597  #define EQ_UNLOCK(eq)                   mutex_exit(&(eq)->lock)
 596  598  #define EQ_LOCK_ASSERT_OWNED(eq)        ASSERT(mutex_owned(&(eq)->lock))
 597  599  #define EQ_LOCK_ASSERT_NOTOWNED(eq)     ASSERT(!mutex_owned(&(eq)->lock))
 598  600  
 599  601  #define TXQ_LOCK(txq)                   EQ_LOCK(&(txq)->eq)
 600  602  #define TXQ_UNLOCK(txq)                 EQ_UNLOCK(&(txq)->eq)
 601  603  #define TXQ_LOCK_ASSERT_OWNED(txq)      EQ_LOCK_ASSERT_OWNED(&(txq)->eq)
 602  604  #define TXQ_LOCK_ASSERT_NOTOWNED(txq)   EQ_LOCK_ASSERT_NOTOWNED(&(txq)->eq)
 603  605  
 604  606  #define for_each_txq(pi, iter, txq) \
 605  607          txq = &pi->adapter->sge.txq[pi->first_txq]; \
 606  608          for (iter = 0; iter < pi->ntxq; ++iter, ++txq)
 607  609  #define for_each_rxq(pi, iter, rxq) \
 608  610          rxq = &pi->adapter->sge.rxq[pi->first_rxq]; \
 609  611          for (iter = 0; iter < pi->nrxq; ++iter, ++rxq)
 610  612  #define for_each_ofld_txq(pi, iter, ofld_txq) \
 611  613          ofld_txq = &pi->adapter->sge.ofld_txq[pi->first_ofld_txq]; \
 612  614          for (iter = 0; iter < pi->nofldtxq; ++iter, ++ofld_txq)
 613  615  #define for_each_ofld_rxq(pi, iter, ofld_rxq) \
 614  616          ofld_rxq = &pi->adapter->sge.ofld_rxq[pi->first_ofld_rxq]; \
 615  617          for (iter = 0; iter < pi->nofldrxq; ++iter, ++ofld_rxq)
 616  618  
 617  619  #define NFIQ(sc) ((sc)->intr_count > 1 ? (sc)->intr_count - 1 : 1)
 618  620  
 619  621  /* One for errors, one for firmware events */
 620  622  #define T4_EXTRA_INTR 2
 621  623  
 622  624  /* Presently disabling locking around  mbox access
 623  625   * We may need to reenable it later
 624  626   */
 625  627  typedef int t4_os_lock_t;
 626  628  static inline void t4_os_lock(t4_os_lock_t *lock)
 627  629  {
 628  630  
 629  631  }
 630  632  static inline void t4_os_unlock(t4_os_lock_t *lock)
 631  633  {
 632  634  
 633  635  }
 634  636  
 635  637  static inline uint32_t
 636  638  t4_read_reg(struct adapter *sc, uint32_t reg)
 637  639  {
 638  640          /* LINTED: E_BAD_PTR_CAST_ALIGN */
 639  641          return (ddi_get32(sc->regh, (uint32_t *)(sc->regp + reg)));
 640  642  }
 641  643  
 642  644  static inline void
 643  645  t4_write_reg(struct adapter *sc, uint32_t reg, uint32_t val)
 644  646  {
 645  647          /* LINTED: E_BAD_PTR_CAST_ALIGN */
 646  648          ddi_put32(sc->regh, (uint32_t *)(sc->regp + reg), val);
 647  649  }
 648  650  
 649  651  static inline void
 650  652  t4_os_pci_read_cfg1(struct adapter *sc, int reg, uint8_t *val)
 651  653  {
 652  654          *val = pci_config_get8(sc->pci_regh, reg);
 653  655  }
 654  656  
 655  657  static inline void
 656  658  t4_os_pci_write_cfg1(struct adapter *sc, int reg, uint8_t val)
 657  659  {
 658  660          pci_config_put8(sc->pci_regh, reg, val);
 659  661  }
 660  662  
 661  663  static inline void
 662  664  t4_os_pci_read_cfg2(struct adapter *sc, int reg, uint16_t *val)
 663  665  {
 664  666          *val = pci_config_get16(sc->pci_regh, reg);
 665  667  }
 666  668  
 667  669  static inline void
 668  670  t4_os_pci_write_cfg2(struct adapter *sc, int reg, uint16_t val)
 669  671  {
 670  672          pci_config_put16(sc->pci_regh, reg, val);
 671  673  }
 672  674  
 673  675  static inline void
 674  676  t4_os_pci_read_cfg4(struct adapter *sc, int reg, uint32_t *val)
 675  677  {
 676  678          *val = pci_config_get32(sc->pci_regh, reg);
 677  679  }
 678  680  
 679  681  static inline void
 680  682  t4_os_pci_write_cfg4(struct adapter *sc, int reg, uint32_t val)
 681  683  {
 682  684          pci_config_put32(sc->pci_regh, reg, val);
 683  685  }
 684  686  
 685  687  static inline uint64_t
 686  688  t4_read_reg64(struct adapter *sc, uint32_t reg)
 687  689  {
 688  690          /* LINTED: E_BAD_PTR_CAST_ALIGN */
 689  691          return (ddi_get64(sc->regh, (uint64_t *)(sc->regp + reg)));
 690  692  }
 691  693  
 692  694  static inline void
 693  695  t4_write_reg64(struct adapter *sc, uint32_t reg, uint64_t val)
 694  696  {
 695  697          /* LINTED: E_BAD_PTR_CAST_ALIGN */
 696  698          ddi_put64(sc->regh, (uint64_t *)(sc->regp + reg), val);
 697  699  }
 698  700  
 699  701  static inline struct port_info *
 700  702  adap2pinfo(struct adapter *sc, int idx)
 701  703  {
 702  704          return (sc->port[idx]);
 703  705  }
 704  706  
 705  707  static inline void
 706  708  t4_os_set_hw_addr(struct adapter *sc, int idx, uint8_t hw_addr[])
 707  709  {
 708  710          bcopy(hw_addr, sc->port[idx]->hw_addr, ETHERADDRL);
 709  711  }
 710  712  
 711  713  static inline bool
 712  714  is_10G_port(const struct port_info *pi)
 713  715  {
 714  716          return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_10G) != 0);
 715  717  }
 716  718  
 717  719  static inline struct sge_rxq *
 718  720  iq_to_rxq(struct sge_iq *iq)
 719  721  {
 720  722          return (container_of(iq, struct sge_rxq, iq));
 721  723  }
 722  724  
 723  725  static inline bool
 724  726  is_25G_port(const struct port_info *pi)
 725  727  {
 726  728          return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_25G) != 0);
 727  729  }
 728  730  
 729  731  static inline bool
 730  732  is_40G_port(const struct port_info *pi)
 731  733  {
 732  734          return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_40G) != 0);
 733  735  }
 734  736  
 735  737  static inline bool
 736  738  is_100G_port(const struct port_info *pi)
 737  739  {
 738  740          return ((pi->link_cfg.supported & FW_PORT_CAP_SPEED_100G) != 0);
 739  741  }
 740  742  
 741  743  static inline bool
 742  744  is_10XG_port(const struct port_info *pi)
 743  745  {
 744  746          return (is_10G_port(pi) || is_40G_port(pi) ||
 745  747                  is_25G_port(pi) || is_100G_port(pi));
 746  748  }
 747  749  
 748  750  static inline char *
 749  751  print_port_speed(const struct port_info *pi)
 750  752  {
 751  753          if (!pi)
 752  754                  return "-";
 753  755  
 754  756          if (is_100G_port(pi))
 755  757                  return "100G";
 756  758          else if (is_40G_port(pi))
 757  759                  return "40G";
 758  760          else if (is_25G_port(pi))
 759  761                  return "25G";
 760  762          else if (is_10G_port(pi))
 761  763                  return "10G";
 762  764          else
 763  765                  return "1G";
 764  766  }
 765  767  
 766  768  #ifdef TCP_OFFLOAD_ENABLE
 767  769  int t4_wrq_tx_locked(struct adapter *sc, struct sge_wrq *wrq, mblk_t *m0);
 768  770  
 769  771  static inline int
 770  772  t4_wrq_tx(struct adapter *sc, struct sge_wrq *wrq, mblk_t *m)
 771  773  {
 772  774          int rc;
 773  775  
 774  776          TXQ_LOCK(wrq);
 775  777          rc = t4_wrq_tx_locked(sc, wrq, m);
 776  778          TXQ_UNLOCK(wrq);
 777  779          return (rc);
 778  780  }
 779  781  #endif
 780  782  
 781  783  /**
 782  784   * t4_os_pci_read_seeprom - read four bytes of SEEPROM/VPD contents
 783  785   * @adapter: the adapter
 784  786   * @addr: SEEPROM/VPD Address to read
 785  787   * @valp: where to store the value read
 786  788   *
 787  789   * Read a 32-bit value from the given address in the SEEPROM/VPD.  The address
 788  790   * must be four-byte aligned.  Returns 0 on success, a negative erro number
 789  791   * on failure.
 790  792   */
 791  793  static inline int t4_os_pci_read_seeprom(adapter_t *adapter,
 792  794                                           int addr, u32 *valp)
 793  795  {
 794  796          int t4_seeprom_read(struct adapter *adapter, u32 addr, u32 *data);
 795  797          int ret;
 796  798  
 797  799          ret = t4_seeprom_read(adapter, addr, valp);
 798  800  
 799  801          return ret >= 0 ? 0 : ret;
 800  802  }
 801  803  
 802  804  /**
 803  805   * t4_os_pci_write_seeprom - write four bytes of SEEPROM/VPD contents
 804  806   * @adapter: the adapter
 805  807   * @addr: SEEPROM/VPD Address to write
 806  808   * @val: the value write
 807  809   *
 808  810   * Write a 32-bit value to the given address in the SEEPROM/VPD.  The address
 809  811   * must be four-byte aligned.  Returns 0 on success, a negative erro number
 810  812   * on failure.
 811  813   */
 812  814  static inline int t4_os_pci_write_seeprom(adapter_t *adapter,
 813  815                                            int addr, u32 val)
 814  816  {
 815  817          int t4_seeprom_write(struct adapter *adapter, u32 addr, u32 data);
 816  818          int ret;
 817  819  
 818  820          ret = t4_seeprom_write(adapter, addr, val);
 819  821  
 820  822          return ret >= 0 ? 0 : ret;
 821  823  }
 822  824  
 823  825  static inline int t4_os_pci_set_vpd_size(struct adapter *adapter, size_t len)
 824  826  {
 825  827          return 0;
 826  828  }
 827  829  
 828  830  static inline unsigned int t4_use_ldst(struct adapter *adap)
 829  831  {
 830  832          return (adap->flags & FW_OK);
 831  833  }
 832  834  #define t4_os_alloc(_size)      kmem_alloc(_size, KM_SLEEP)
 833  835  
 834  836  static inline void t4_db_full(struct adapter *adap) {}
 835  837  static inline void t4_db_dropped(struct adapter *adap) {}
 836  838  
 837  839  /* t4_nexus.c */
 838  840  int t4_os_find_pci_capability(struct adapter *sc, int cap);
 839  841  void t4_os_portmod_changed(const struct adapter *sc, int idx);
 840  842  int adapter_full_init(struct adapter *sc);
 841  843  int adapter_full_uninit(struct adapter *sc);
 842  844  int port_full_init(struct port_info *pi);
 843  845  int port_full_uninit(struct port_info *pi);
 844  846  void enable_port_queues(struct port_info *pi);
 845  847  void disable_port_queues(struct port_info *pi);
 846  848  int t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h);
 847  849  int t4_register_fw_msg_handler(struct adapter *, int, fw_msg_handler_t);
 848  850  void t4_iterate(void (*func)(int, void *), void *arg);
 849  851  
 850  852  /* t4_sge.c */
 851  853  void t4_sge_init(struct adapter *sc);
 852  854  int t4_setup_adapter_queues(struct adapter *sc);
 853  855  int t4_teardown_adapter_queues(struct adapter *sc);
 854  856  int t4_setup_port_queues(struct port_info *pi);
 855  857  int t4_teardown_port_queues(struct port_info *pi);
 856  858  uint_t t4_intr_all(caddr_t arg1, caddr_t arg2);
 857  859  uint_t t4_intr(caddr_t arg1, caddr_t arg2);
 858  860  uint_t t4_intr_err(caddr_t arg1, caddr_t arg2);
 859  861  int t4_mgmt_tx(struct adapter *sc, mblk_t *m);
 860  862  void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
 861  863  uint32_t position_memwin(struct adapter *, int, uint32_t);
 862  864  
 863  865  mblk_t *t4_eth_tx(void *, mblk_t *);
 864  866  mblk_t *t4_mc_tx(void *arg, mblk_t *m);
 865  867  mblk_t *t4_ring_rx(struct sge_rxq *rxq, int poll_bytes);
 866  868  int t4_alloc_tx_maps(struct adapter *sc, struct tx_maps *txmaps,  int count,
 867  869      int flags);
 868  870  
 869  871  /* t4_mac.c */
 870  872  void t4_mc_init(struct port_info *pi);
 871  873  void t4_mc_cb_init(struct port_info *);
 872  874  void t4_os_link_changed(struct adapter *sc, int idx, int link_stat);
 873  875  void t4_mac_rx(struct port_info *pi, struct sge_rxq *rxq, mblk_t *m);
 874  876  void t4_mac_tx_update(struct port_info *pi, struct sge_txq *txq);
 875  877  int t4_addmac(void *arg, const uint8_t *ucaddr);
 876  878  
 877  879  /* t4_ioctl.c */
 878  880  int t4_ioctl(struct adapter *sc, int cmd, void *data, int mode);
 879  881  
 880  882  struct l2t_data *t4_init_l2t(struct adapter *sc);
 881  883  #endif /* __CXGBE_ADAPTER_H */
  
    | 
      ↓ open down ↓ | 
    362 lines elided | 
    
      ↑ open up ↑ | 
  
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX