Print this page
MFV: illumos-gate@bbb9d5d65bf8372aae4b8821c80e218b8b832846
9994 cxgbe t4nex: Handle get_fl_payload() alloc failures
9995 cxgbe t4_devo_attach() should initialize ->sfl
Reviewed by: Toomas Soome <tsoome@me.com>
Reviewed by: Garrett D'Amore <garrett@damore.org>
Approved by: Dan McDonald <danmcd@joyent.com>
Author: John Levon <john.levon@joyent.com>
9484 cxgbe should clean TX descriptors in timely manner
Reviewed by: Patrick Mooney <patrick.mooney@joyent.com>
Reviewed by: Ryan Zezeski <rpz@joyent.com>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
Reviewed by: Toomas Soome <tsoome@me.com>
Approved by: Dan McDonald <danmcd@joyent.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/cxgbe/t4nex/t4_nexus.c
+++ new/usr/src/uts/common/io/cxgbe/t4nex/t4_nexus.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * This file is part of the Chelsio T4 support code.
14 14 *
15 15 * Copyright (C) 2010-2013 Chelsio Communications. All rights reserved.
16 16 *
17 17 * This program is distributed in the hope that it will be useful, but WITHOUT
18 18 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
19 19 * FITNESS FOR A PARTICULAR PURPOSE. See the LICENSE file included in this
20 20 * release for licensing terms and conditions.
21 21 */
22 22
23 23 #include <sys/ddi.h>
24 24 #include <sys/sunddi.h>
25 25 #include <sys/sunndi.h>
26 26 #include <sys/modctl.h>
27 27 #include <sys/conf.h>
28 28 #include <sys/devops.h>
29 29 #include <sys/pci.h>
30 30 #include <sys/atomic.h>
31 31 #include <sys/types.h>
32 32 #include <sys/file.h>
33 33 #include <sys/errno.h>
34 34 #include <sys/open.h>
35 35 #include <sys/cred.h>
36 36 #include <sys/stat.h>
37 37 #include <sys/mkdev.h>
38 38 #include <sys/queue.h>
39 39
40 40 #include "version.h"
41 41 #include "common/common.h"
42 42 #include "common/t4_msg.h"
43 43 #include "common/t4_regs.h"
44 44 #include "firmware/t4_fw.h"
45 45 #include "firmware/t4_cfg.h"
46 46 #include "firmware/t5_fw.h"
47 47 #include "firmware/t5_cfg.h"
48 48 #include "firmware/t6_fw.h"
49 49 #include "firmware/t6_cfg.h"
50 50 #include "t4_l2t.h"
51 51
52 52 static int t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp);
53 53 static int t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp);
54 54 static int t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp,
55 55 int *rp);
56 56 struct cb_ops t4_cb_ops = {
57 57 .cb_open = t4_cb_open,
58 58 .cb_close = t4_cb_close,
59 59 .cb_strategy = nodev,
60 60 .cb_print = nodev,
61 61 .cb_dump = nodev,
62 62 .cb_read = nodev,
63 63 .cb_write = nodev,
64 64 .cb_ioctl = t4_cb_ioctl,
65 65 .cb_devmap = nodev,
66 66 .cb_mmap = nodev,
67 67 .cb_segmap = nodev,
68 68 .cb_chpoll = nochpoll,
69 69 .cb_prop_op = ddi_prop_op,
70 70 .cb_flag = D_MP,
71 71 .cb_rev = CB_REV,
72 72 .cb_aread = nodev,
73 73 .cb_awrite = nodev
74 74 };
75 75
76 76 static int t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op,
77 77 void *arg, void *result);
78 78 static int t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
79 79 void *arg, dev_info_t **cdipp);
80 80 static int t4_bus_unconfig(dev_info_t *dip, uint_t flags,
81 81 ddi_bus_config_op_t op, void *arg);
82 82 struct bus_ops t4_bus_ops = {
83 83 .busops_rev = BUSO_REV,
84 84 .bus_ctl = t4_bus_ctl,
85 85 .bus_prop_op = ddi_bus_prop_op,
86 86 .bus_config = t4_bus_config,
87 87 .bus_unconfig = t4_bus_unconfig,
88 88 };
89 89
90 90 static int t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg,
91 91 void **rp);
92 92 static int t4_devo_probe(dev_info_t *dip);
93 93 static int t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd);
94 94 static int t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd);
95 95 static int t4_devo_quiesce(dev_info_t *dip);
96 96 struct dev_ops t4_dev_ops = {
97 97 .devo_rev = DEVO_REV,
98 98 .devo_getinfo = t4_devo_getinfo,
99 99 .devo_identify = nulldev,
100 100 .devo_probe = t4_devo_probe,
101 101 .devo_attach = t4_devo_attach,
102 102 .devo_detach = t4_devo_detach,
103 103 .devo_reset = nodev,
104 104 .devo_cb_ops = &t4_cb_ops,
105 105 .devo_bus_ops = &t4_bus_ops,
106 106 .devo_quiesce = &t4_devo_quiesce,
107 107 };
108 108
109 109 static struct modldrv modldrv = {
110 110 .drv_modops = &mod_driverops,
111 111 .drv_linkinfo = "Chelsio T4 nexus " DRV_VERSION,
112 112 .drv_dev_ops = &t4_dev_ops
113 113 };
114 114
115 115 static struct modlinkage modlinkage = {
116 116 .ml_rev = MODREV_1,
117 117 .ml_linkage = {&modldrv, NULL},
118 118 };
119 119
120 120 void *t4_list;
121 121
122 122 struct intrs_and_queues {
123 123 int intr_type; /* DDI_INTR_TYPE_* */
124 124 int nirq; /* Number of vectors */
125 125 int intr_fwd; /* Interrupts forwarded */
126 126 int ntxq10g; /* # of NIC txq's for each 10G port */
127 127 int nrxq10g; /* # of NIC rxq's for each 10G port */
128 128 int ntxq1g; /* # of NIC txq's for each 1G port */
129 129 int nrxq1g; /* # of NIC rxq's for each 1G port */
130 130 #ifdef TCP_OFFLOAD_ENABLE
131 131 int nofldtxq10g; /* # of TOE txq's for each 10G port */
132 132 int nofldrxq10g; /* # of TOE rxq's for each 10G port */
133 133 int nofldtxq1g; /* # of TOE txq's for each 1G port */
134 134 int nofldrxq1g; /* # of TOE rxq's for each 1G port */
135 135 #endif
136 136 };
137 137
138 138 struct fw_info fi[3];
139 139
140 140 static int cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss,
141 141 mblk_t *m);
142 142 static int fw_msg_not_handled(struct adapter *, const __be64 *);
143 143 int t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h);
144 144 static unsigned int getpf(struct adapter *sc);
145 145 static int prep_firmware(struct adapter *sc);
146 146 static int upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma);
147 147 static int partition_resources(struct adapter *sc);
148 148 static int adap__pre_init_tweaks(struct adapter *sc);
149 149 static int get_params__pre_init(struct adapter *sc);
150 150 static int get_params__post_init(struct adapter *sc);
151 151 static int set_params__post_init(struct adapter *);
152 152 static void setup_memwin(struct adapter *sc);
153 153 static int validate_mt_off_len(struct adapter *, int, uint32_t, int,
154 154 uint32_t *);
155 155 void memwin_info(struct adapter *, int, uint32_t *, uint32_t *);
156 156 uint32_t position_memwin(struct adapter *, int, uint32_t);
157 157 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
158 158 uint_t count);
159 159 static int prop_lookup_int_array(struct adapter *sc, char *name, int *data,
160 160 uint_t count);
161 161 static int init_driver_props(struct adapter *sc, struct driver_properties *p);
162 162 static int remove_extra_props(struct adapter *sc, int n10g, int n1g);
163 163 static int cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
164 164 struct intrs_and_queues *iaq);
165 165 static int add_child_node(struct adapter *sc, int idx);
166 166 static int remove_child_node(struct adapter *sc, int idx);
167 167 static kstat_t *setup_kstats(struct adapter *sc);
168 168 static kstat_t *setup_wc_kstats(struct adapter *);
169 169 static int update_wc_kstats(kstat_t *, int);
170 170 #ifdef TCP_OFFLOAD_ENABLE
171 171 static int toe_capability(struct port_info *pi, int enable);
172 172 static int activate_uld(struct adapter *sc, int id, struct uld_softc *usc);
173 173 static int deactivate_uld(struct uld_softc *usc);
174 174 #endif
175 175 static kmutex_t t4_adapter_list_lock;
176 176 static SLIST_HEAD(, adapter) t4_adapter_list;
177 177 #ifdef TCP_OFFLOAD_ENABLE
178 178 static kmutex_t t4_uld_list_lock;
179 179 static SLIST_HEAD(, uld_info) t4_uld_list;
180 180 #endif
181 181
182 182 int
183 183 _init(void)
184 184 {
185 185 int rc;
186 186
187 187 rc = ddi_soft_state_init(&t4_list, sizeof (struct adapter), 0);
188 188 if (rc != 0)
189 189 return (rc);
190 190
191 191 rc = mod_install(&modlinkage);
192 192 if (rc != 0)
193 193 ddi_soft_state_fini(&t4_list);
194 194
195 195 mutex_init(&t4_adapter_list_lock, NULL, MUTEX_DRIVER, NULL);
196 196 SLIST_INIT(&t4_adapter_list);
197 197
198 198 #ifdef TCP_OFFLOAD_ENABLE
199 199 mutex_init(&t4_uld_list_lock, NULL, MUTEX_DRIVER, NULL);
200 200 SLIST_INIT(&t4_uld_list);
201 201 #endif
202 202
203 203 return (rc);
204 204 }
205 205
206 206 int
207 207 _fini(void)
208 208 {
209 209 int rc;
210 210
211 211 rc = mod_remove(&modlinkage);
212 212 if (rc != 0)
213 213 return (rc);
214 214
215 215 ddi_soft_state_fini(&t4_list);
216 216 return (0);
217 217 }
218 218
219 219 int
220 220 _info(struct modinfo *mi)
221 221 {
222 222 return (mod_info(&modlinkage, mi));
223 223 }
224 224
225 225 /* ARGSUSED */
226 226 static int
227 227 t4_devo_getinfo(dev_info_t *dip, ddi_info_cmd_t cmd, void *arg, void **rp)
228 228 {
229 229 struct adapter *sc;
230 230 minor_t minor;
231 231
232 232 minor = getminor((dev_t)arg); /* same as instance# in our case */
233 233
234 234 if (cmd == DDI_INFO_DEVT2DEVINFO) {
235 235 sc = ddi_get_soft_state(t4_list, minor);
236 236 if (sc == NULL)
237 237 return (DDI_FAILURE);
238 238
239 239 ASSERT(sc->dev == (dev_t)arg);
240 240 *rp = (void *)sc->dip;
241 241 } else if (cmd == DDI_INFO_DEVT2INSTANCE)
242 242 *rp = (void *) (unsigned long) minor;
243 243 else
244 244 ASSERT(0);
245 245
246 246 return (DDI_SUCCESS);
247 247 }
248 248
249 249 static int
250 250 t4_devo_probe(dev_info_t *dip)
251 251 {
252 252 int rc, id, *reg;
253 253 uint_t n, pf;
254 254
255 255 id = ddi_prop_get_int(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
256 256 "device-id", 0xffff);
257 257 if (id == 0xffff)
258 258 return (DDI_PROBE_DONTCARE);
259 259
260 260 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
261 261 "reg", ®, &n);
262 262 if (rc != DDI_SUCCESS)
263 263 return (DDI_PROBE_DONTCARE);
264 264
265 265 pf = PCI_REG_FUNC_G(reg[0]);
266 266 ddi_prop_free(reg);
267 267
268 268 /* Prevent driver attachment on any PF except 0 on the FPGA */
269 269 if (id == 0xa000 && pf != 0)
270 270 return (DDI_PROBE_FAILURE);
271 271
272 272 return (DDI_PROBE_DONTCARE);
273 273 }
274 274
275 275 static int
276 276 t4_devo_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
277 277 {
278 278 struct adapter *sc = NULL;
279 279 struct sge *s;
280 280 int i, instance, rc = DDI_SUCCESS, rqidx, tqidx, q;
281 281 int irq = 0, nxg, n100g, n40g, n25g, n10g, n1g;
282 282 #ifdef TCP_OFFLOAD_ENABLE
283 283 int ofld_rqidx, ofld_tqidx;
284 284 #endif
285 285 char name[16];
286 286 struct driver_properties *prp;
287 287 struct intrs_and_queues iaq;
288 288 ddi_device_acc_attr_t da = {
289 289 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
290 290 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
291 291 .devacc_attr_dataorder = DDI_UNORDERED_OK_ACC
292 292 };
293 293 ddi_device_acc_attr_t da1 = {
294 294 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
295 295 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
296 296 .devacc_attr_dataorder = DDI_MERGING_OK_ACC
297 297 };
298 298
299 299 if (cmd != DDI_ATTACH)
300 300 return (DDI_FAILURE);
301 301
302 302 /*
303 303 * Allocate space for soft state.
304 304 */
305 305 instance = ddi_get_instance(dip);
306 306 rc = ddi_soft_state_zalloc(t4_list, instance);
307 307 if (rc != DDI_SUCCESS) {
308 308 cxgb_printf(dip, CE_WARN,
|
↓ open down ↓ |
308 lines elided |
↑ open up ↑ |
309 309 "failed to allocate soft state: %d", rc);
310 310 return (DDI_FAILURE);
311 311 }
312 312
313 313 sc = ddi_get_soft_state(t4_list, instance);
314 314 sc->dip = dip;
315 315 sc->dev = makedevice(ddi_driver_major(dip), instance);
316 316 mutex_init(&sc->lock, NULL, MUTEX_DRIVER, NULL);
317 317 cv_init(&sc->cv, NULL, CV_DRIVER, NULL);
318 318 mutex_init(&sc->sfl_lock, NULL, MUTEX_DRIVER, NULL);
319 + TAILQ_INIT(&sc->sfl);
319 320
320 321 mutex_enter(&t4_adapter_list_lock);
321 322 SLIST_INSERT_HEAD(&t4_adapter_list, sc, link);
322 323 mutex_exit(&t4_adapter_list_lock);
323 324
324 325 sc->pf = getpf(sc);
325 326 if (sc->pf > 8) {
326 327 rc = EINVAL;
327 328 cxgb_printf(dip, CE_WARN,
328 329 "failed to determine PCI PF# of device");
329 330 goto done;
330 331 }
331 332 sc->mbox = sc->pf;
332 333
333 334 /* Initialize the driver properties */
334 335 prp = &sc->props;
335 336 (void)init_driver_props(sc, prp);
336 337
337 338 /*
338 339 * Enable access to the PCI config space.
339 340 */
340 341 rc = pci_config_setup(dip, &sc->pci_regh);
341 342 if (rc != DDI_SUCCESS) {
342 343 cxgb_printf(dip, CE_WARN,
343 344 "failed to enable PCI config space access: %d", rc);
344 345 goto done;
345 346 }
346 347
347 348 /* TODO: Set max read request to 4K */
348 349
349 350 /*
350 351 * Enable MMIO access.
351 352 */
352 353 rc = ddi_regs_map_setup(dip, 1, &sc->regp, 0, 0, &da, &sc->regh);
353 354 if (rc != DDI_SUCCESS) {
354 355 cxgb_printf(dip, CE_WARN,
355 356 "failed to map device registers: %d", rc);
356 357 goto done;
357 358 }
358 359
359 360 (void) memset(sc->chan_map, 0xff, sizeof (sc->chan_map));
360 361
361 362 /*
|
↓ open down ↓ |
33 lines elided |
↑ open up ↑ |
362 363 * Initialize cpl handler.
363 364 */
364 365 for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++) {
365 366 sc->cpl_handler[i] = cpl_not_handled;
366 367 }
367 368
368 369 for (i = 0; i < ARRAY_SIZE(sc->fw_msg_handler); i++) {
369 370 sc->fw_msg_handler[i] = fw_msg_not_handled;
370 371 }
371 372
373 + for (i = 0; i < NCHAN; i++) {
374 + (void) snprintf(name, sizeof (name), "%s-%d",
375 + "reclaim", i);
376 + sc->tq[i] = ddi_taskq_create(sc->dip,
377 + name, 1, TASKQ_DEFAULTPRI, 0);
378 +
379 + if (sc->tq[i] == NULL) {
380 + cxgb_printf(dip, CE_WARN,
381 + "failed to create task queues");
382 + rc = DDI_FAILURE;
383 + goto done;
384 + }
385 + }
386 +
372 387 /*
373 388 * Prepare the adapter for operation.
374 389 */
375 390 rc = -t4_prep_adapter(sc, false);
376 391 if (rc != 0) {
377 392 cxgb_printf(dip, CE_WARN, "failed to prepare adapter: %d", rc);
378 393 goto done;
379 394 }
380 395
381 396 /*
382 397 * Enable BAR1 access.
383 398 */
384 399 sc->doorbells |= DOORBELL_KDB;
385 400 rc = ddi_regs_map_setup(dip, 2, &sc->reg1p, 0, 0, &da1, &sc->reg1h);
386 401 if (rc != DDI_SUCCESS) {
387 402 cxgb_printf(dip, CE_WARN,
388 403 "failed to map BAR1 device registers: %d", rc);
389 404 goto done;
390 405 } else {
391 406 if (is_t5(sc->params.chip)) {
392 407 sc->doorbells |= DOORBELL_UDB;
393 408 if (prp->wc) {
394 409 /*
395 410 * Enable write combining on BAR2. This is the
396 411 * userspace doorbell BAR and is split into 128B
397 412 * (UDBS_SEG_SIZE) doorbell regions, each associated
398 413 * with an egress queue. The first 64B has the doorbell
399 414 * and the second 64B can be used to submit a tx work
400 415 * request with an implicit doorbell.
401 416 */
402 417 sc->doorbells &= ~DOORBELL_UDB;
403 418 sc->doorbells |= (DOORBELL_WCWR |
404 419 DOORBELL_UDBWC);
405 420 t4_write_reg(sc, A_SGE_STAT_CFG,
406 421 V_STATSOURCE_T5(7) | V_STATMODE(0));
407 422 }
408 423 }
409 424 }
410 425
411 426 /*
412 427 * Do this really early. Note that minor number = instance.
413 428 */
414 429 (void) snprintf(name, sizeof (name), "%s,%d", T4_NEXUS_NAME, instance);
415 430 rc = ddi_create_minor_node(dip, name, S_IFCHR, instance,
416 431 DDI_NT_NEXUS, 0);
417 432 if (rc != DDI_SUCCESS) {
418 433 cxgb_printf(dip, CE_WARN,
419 434 "failed to create device node: %d", rc);
420 435 rc = DDI_SUCCESS; /* carry on */
421 436 }
422 437
423 438 /* Do this early. Memory window is required for loading config file. */
424 439 setup_memwin(sc);
425 440
426 441 /* Prepare the firmware for operation */
427 442 rc = prep_firmware(sc);
428 443 if (rc != 0)
429 444 goto done; /* error message displayed already */
430 445
431 446 rc = adap__pre_init_tweaks(sc);
432 447 if (rc != 0)
433 448 goto done;
434 449
435 450 rc = get_params__pre_init(sc);
436 451 if (rc != 0)
437 452 goto done; /* error message displayed already */
438 453
439 454 t4_sge_init(sc);
440 455
441 456 if (sc->flags & MASTER_PF) {
442 457 /* get basic stuff going */
443 458 rc = -t4_fw_initialize(sc, sc->mbox);
444 459 if (rc != 0) {
445 460 cxgb_printf(sc->dip, CE_WARN,
446 461 "early init failed: %d.\n", rc);
447 462 goto done;
448 463 }
449 464 }
450 465
451 466 rc = get_params__post_init(sc);
452 467 if (rc != 0)
453 468 goto done; /* error message displayed already */
454 469
455 470 rc = set_params__post_init(sc);
456 471 if (rc != 0)
457 472 goto done; /* error message displayed already */
458 473
459 474 /*
460 475 * TODO: This is the place to call t4_set_filter_mode()
461 476 */
462 477
463 478 /* tweak some settings */
464 479 t4_write_reg(sc, A_TP_SHIFT_CNT, V_SYNSHIFTMAX(6) | V_RXTSHIFTMAXR1(4) |
465 480 V_RXTSHIFTMAXR2(15) | V_PERSHIFTBACKOFFMAX(8) | V_PERSHIFTMAX(8) |
466 481 V_KEEPALIVEMAXR1(4) | V_KEEPALIVEMAXR2(9));
467 482 t4_write_reg(sc, A_ULP_RX_TDDP_PSZ, V_HPZ0(PAGE_SHIFT - 12));
468 483
469 484 /*
470 485 * Work-around for bug 2619
471 486 * Set DisableVlan field in TP_RSS_CONFIG_VRT register so that the
472 487 * VLAN tag extraction is disabled.
473 488 */
474 489 t4_set_reg_field(sc, A_TP_RSS_CONFIG_VRT, F_DISABLEVLAN, F_DISABLEVLAN);
475 490
476 491 /* Store filter mode */
477 492 t4_read_indirect(sc, A_TP_PIO_ADDR, A_TP_PIO_DATA, &sc->filter_mode, 1,
478 493 A_TP_VLAN_PRI_MAP);
479 494
480 495 /*
481 496 * First pass over all the ports - allocate VIs and initialize some
482 497 * basic parameters like mac address, port type, etc. We also figure
483 498 * out whether a port is 10G or 1G and use that information when
484 499 * calculating how many interrupts to attempt to allocate.
485 500 */
486 501 n100g = n40g = n25g = n10g = n1g = 0;
487 502 for_each_port(sc, i) {
488 503 struct port_info *pi;
489 504
490 505 pi = kmem_zalloc(sizeof (*pi), KM_SLEEP);
491 506 sc->port[i] = pi;
492 507
493 508 /* These must be set before t4_port_init */
494 509 pi->adapter = sc;
495 510 /* LINTED: E_ASSIGN_NARROW_CONV */
496 511 pi->port_id = i;
497 512 }
498 513
499 514 /* Allocate the vi and initialize parameters like mac addr */
500 515 rc = -t4_port_init(sc, sc->mbox, sc->pf, 0);
501 516 if (rc) {
502 517 cxgb_printf(dip, CE_WARN,
503 518 "unable to initialize port: %d", rc);
504 519 goto done;
505 520 }
506 521
507 522 for_each_port(sc, i) {
508 523 struct port_info *pi = sc->port[i];
509 524
510 525 mutex_init(&pi->lock, NULL, MUTEX_DRIVER, NULL);
511 526 pi->mtu = ETHERMTU;
512 527
513 528 if (is_100G_port(pi)) {
514 529 n100g++;
515 530 pi->tmr_idx = prp->tmr_idx_10g;
516 531 pi->pktc_idx = prp->pktc_idx_10g;
517 532 } else if (is_40G_port(pi)) {
518 533 n40g++;
519 534 pi->tmr_idx = prp->tmr_idx_10g;
520 535 pi->pktc_idx = prp->pktc_idx_10g;
521 536 } else if (is_25G_port(pi)) {
522 537 n25g++;
523 538 pi->tmr_idx = prp->tmr_idx_10g;
524 539 pi->pktc_idx = prp->pktc_idx_10g;
525 540 } else if (is_10G_port(pi)) {
526 541 n10g++;
527 542 pi->tmr_idx = prp->tmr_idx_10g;
528 543 pi->pktc_idx = prp->pktc_idx_10g;
529 544 } else {
530 545 n1g++;
531 546 pi->tmr_idx = prp->tmr_idx_1g;
532 547 pi->pktc_idx = prp->pktc_idx_1g;
533 548 }
534 549
535 550 pi->xact_addr_filt = -1;
536 551 t4_mc_init(pi);
537 552
538 553 setbit(&sc->registered_device_map, i);
539 554 }
540 555
541 556 nxg = n10g + n25g + n40g + n100g;
542 557 (void) remove_extra_props(sc, nxg, n1g);
543 558
544 559 if (sc->registered_device_map == 0) {
545 560 cxgb_printf(dip, CE_WARN, "no usable ports");
546 561 rc = DDI_FAILURE;
547 562 goto done;
548 563 }
549 564
550 565 rc = cfg_itype_and_nqueues(sc, nxg, n1g, &iaq);
551 566 if (rc != 0)
552 567 goto done; /* error message displayed already */
553 568
554 569 sc->intr_type = iaq.intr_type;
555 570 sc->intr_count = iaq.nirq;
556 571
557 572 if (sc->props.multi_rings && (sc->intr_type != DDI_INTR_TYPE_MSIX)) {
558 573 sc->props.multi_rings = 0;
559 574 cxgb_printf(dip, CE_WARN,
560 575 "Multiple rings disabled as interrupt type is not MSI-X");
561 576 }
562 577
563 578 if (sc->props.multi_rings && iaq.intr_fwd) {
564 579 sc->props.multi_rings = 0;
565 580 cxgb_printf(dip, CE_WARN,
566 581 "Multiple rings disabled as interrupts are forwarded");
567 582 }
568 583
569 584 if (!sc->props.multi_rings) {
570 585 iaq.ntxq10g = 1;
571 586 iaq.ntxq1g = 1;
572 587 }
573 588 s = &sc->sge;
574 589 s->nrxq = nxg * iaq.nrxq10g + n1g * iaq.nrxq1g;
575 590 s->ntxq = nxg * iaq.ntxq10g + n1g * iaq.ntxq1g;
576 591 s->neq = s->ntxq + s->nrxq; /* the fl in an rxq is an eq */
577 592 #ifdef TCP_OFFLOAD_ENABLE
578 593 /* control queues, 1 per port + 1 mgmtq */
579 594 s->neq += sc->params.nports + 1;
580 595 #endif
581 596 s->niq = s->nrxq + 1; /* 1 extra for firmware event queue */
582 597 if (iaq.intr_fwd != 0)
583 598 sc->flags |= INTR_FWD;
584 599 #ifdef TCP_OFFLOAD_ENABLE
585 600 if (is_offload(sc) != 0) {
586 601
587 602 s->nofldrxq = nxg * iaq.nofldrxq10g + n1g * iaq.nofldrxq1g;
588 603 s->nofldtxq = nxg * iaq.nofldtxq10g + n1g * iaq.nofldtxq1g;
589 604 s->neq += s->nofldtxq + s->nofldrxq;
590 605 s->niq += s->nofldrxq;
591 606
592 607 s->ofld_rxq = kmem_zalloc(s->nofldrxq *
593 608 sizeof (struct sge_ofld_rxq), KM_SLEEP);
594 609 s->ofld_txq = kmem_zalloc(s->nofldtxq *
595 610 sizeof (struct sge_wrq), KM_SLEEP);
596 611 s->ctrlq = kmem_zalloc(sc->params.nports *
597 612 sizeof (struct sge_wrq), KM_SLEEP);
598 613
599 614 }
600 615 #endif
601 616 s->rxq = kmem_zalloc(s->nrxq * sizeof (struct sge_rxq), KM_SLEEP);
602 617 s->txq = kmem_zalloc(s->ntxq * sizeof (struct sge_txq), KM_SLEEP);
603 618 s->iqmap = kmem_zalloc(s->niq * sizeof (struct sge_iq *), KM_SLEEP);
604 619 s->eqmap = kmem_zalloc(s->neq * sizeof (struct sge_eq *), KM_SLEEP);
605 620
606 621 sc->intr_handle = kmem_zalloc(sc->intr_count *
607 622 sizeof (ddi_intr_handle_t), KM_SLEEP);
608 623
609 624 /*
610 625 * Second pass over the ports. This time we know the number of rx and
611 626 * tx queues that each port should get.
612 627 */
613 628 rqidx = tqidx = 0;
614 629 #ifdef TCP_OFFLOAD_ENABLE
615 630 ofld_rqidx = ofld_tqidx = 0;
616 631 #endif
617 632 for_each_port(sc, i) {
618 633 struct port_info *pi = sc->port[i];
619 634
620 635 if (pi == NULL)
621 636 continue;
622 637
623 638 t4_mc_cb_init(pi);
624 639 /* LINTED: E_ASSIGN_NARROW_CONV */
625 640 pi->first_rxq = rqidx;
626 641 /* LINTED: E_ASSIGN_NARROW_CONV */
627 642 pi->nrxq = (is_10XG_port(pi)) ? iaq.nrxq10g
628 643 : iaq.nrxq1g;
629 644 /* LINTED: E_ASSIGN_NARROW_CONV */
630 645 pi->first_txq = tqidx;
631 646 /* LINTED: E_ASSIGN_NARROW_CONV */
632 647 pi->ntxq = (is_10XG_port(pi)) ? iaq.ntxq10g
633 648 : iaq.ntxq1g;
634 649
635 650 rqidx += pi->nrxq;
636 651 tqidx += pi->ntxq;
637 652
638 653 #ifdef TCP_OFFLOAD_ENABLE
639 654 if (is_offload(sc) != 0) {
640 655 /* LINTED: E_ASSIGN_NARROW_CONV */
641 656 pi->first_ofld_rxq = ofld_rqidx;
642 657 pi->nofldrxq = max(1, pi->nrxq / 4);
643 658
644 659 /* LINTED: E_ASSIGN_NARROW_CONV */
645 660 pi->first_ofld_txq = ofld_tqidx;
646 661 pi->nofldtxq = max(1, pi->ntxq / 2);
647 662
648 663 ofld_rqidx += pi->nofldrxq;
649 664 ofld_tqidx += pi->nofldtxq;
650 665 }
651 666 #endif
652 667
653 668 /*
654 669 * Enable hw checksumming and LSO for all ports by default.
655 670 * They can be disabled using ndd (hw_csum and hw_lso).
656 671 */
657 672 pi->features |= (CXGBE_HW_CSUM | CXGBE_HW_LSO);
658 673 }
659 674
660 675 #ifdef TCP_OFFLOAD_ENABLE
661 676 sc->l2t = t4_init_l2t(sc);
662 677 #endif
663 678
664 679 /*
665 680 * Setup Interrupts.
666 681 */
667 682
668 683 i = 0;
669 684 rc = ddi_intr_alloc(dip, sc->intr_handle, sc->intr_type, 0,
670 685 sc->intr_count, &i, DDI_INTR_ALLOC_STRICT);
671 686 if (rc != DDI_SUCCESS) {
672 687 cxgb_printf(dip, CE_WARN,
673 688 "failed to allocate %d interrupt(s) of type %d: %d, %d",
674 689 sc->intr_count, sc->intr_type, rc, i);
675 690 goto done;
676 691 }
677 692 ASSERT(sc->intr_count == i); /* allocation was STRICT */
678 693 (void) ddi_intr_get_cap(sc->intr_handle[0], &sc->intr_cap);
679 694 (void) ddi_intr_get_pri(sc->intr_handle[0], &sc->intr_pri);
680 695 if (sc->intr_count == 1) {
681 696 ASSERT(sc->flags & INTR_FWD);
682 697 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_all, sc,
683 698 &s->fwq);
684 699 } else {
685 700 /* Multiple interrupts. The first one is always error intr */
686 701 (void) ddi_intr_add_handler(sc->intr_handle[0], t4_intr_err, sc,
687 702 NULL);
688 703 irq++;
689 704
690 705 /* The second one is always the firmware event queue */
691 706 (void) ddi_intr_add_handler(sc->intr_handle[1], t4_intr, sc,
692 707 &s->fwq);
693 708 irq++;
694 709 /*
695 710 * Note that if INTR_FWD is set then either the NIC rx
696 711 * queues or (exclusive or) the TOE rx queueus will be taking
697 712 * direct interrupts.
698 713 *
699 714 * There is no need to check for is_offload(sc) as nofldrxq
700 715 * will be 0 if offload is disabled.
701 716 */
702 717 for_each_port(sc, i) {
703 718 struct port_info *pi = sc->port[i];
704 719 struct sge_rxq *rxq;
705 720 #ifdef TCP_OFFLOAD_ENABLE
706 721 struct sge_ofld_rxq *ofld_rxq;
707 722
708 723 /*
709 724 * Skip over the NIC queues if they aren't taking direct
710 725 * interrupts.
711 726 */
712 727 if ((sc->flags & INTR_FWD) &&
713 728 pi->nofldrxq > pi->nrxq)
714 729 goto ofld_queues;
715 730 #endif
716 731 rxq = &s->rxq[pi->first_rxq];
717 732 for (q = 0; q < pi->nrxq; q++, rxq++) {
718 733 (void) ddi_intr_add_handler(
719 734 sc->intr_handle[irq], t4_intr, sc,
720 735 &rxq->iq);
721 736 irq++;
722 737 }
723 738
724 739 #ifdef TCP_OFFLOAD_ENABLE
725 740 /*
726 741 * Skip over the offload queues if they aren't taking
727 742 * direct interrupts.
728 743 */
729 744 if ((sc->flags & INTR_FWD))
730 745 continue;
731 746 ofld_queues:
732 747 ofld_rxq = &s->ofld_rxq[pi->first_ofld_rxq];
733 748 for (q = 0; q < pi->nofldrxq; q++, ofld_rxq++) {
734 749 (void) ddi_intr_add_handler(
735 750 sc->intr_handle[irq], t4_intr, sc,
736 751 &ofld_rxq->iq);
737 752 irq++;
738 753 }
739 754 #endif
740 755 }
741 756
742 757 }
743 758 sc->flags |= INTR_ALLOCATED;
744 759
745 760 ASSERT(rc == DDI_SUCCESS);
746 761 ddi_report_dev(dip);
747 762
748 763 /*
749 764 * Hardware/Firmware/etc. Version/Revision IDs.
750 765 */
751 766 t4_dump_version_info(sc);
752 767
753 768 if (n100g) {
754 769 cxgb_printf(dip, CE_NOTE,
755 770 "%dx100G (%d rxq, %d txq total) %d %s.",
756 771 n100g, rqidx, tqidx, sc->intr_count,
757 772 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
758 773 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
759 774 "fixed interrupt");
760 775 } else if (n40g) {
761 776 cxgb_printf(dip, CE_NOTE,
762 777 "%dx40G (%d rxq, %d txq total) %d %s.",
763 778 n40g, rqidx, tqidx, sc->intr_count,
764 779 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
765 780 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
766 781 "fixed interrupt");
767 782 } else if (n25g) {
768 783 cxgb_printf(dip, CE_NOTE,
769 784 "%dx25G (%d rxq, %d txq total) %d %s.",
770 785 n25g, rqidx, tqidx, sc->intr_count,
771 786 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
772 787 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
773 788 "fixed interrupt");
774 789 } else if (n10g && n1g) {
775 790 cxgb_printf(dip, CE_NOTE,
776 791 "%dx10G %dx1G (%d rxq, %d txq total) %d %s.",
777 792 n10g, n1g, rqidx, tqidx, sc->intr_count,
778 793 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
779 794 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
780 795 "fixed interrupt");
781 796 } else {
782 797 cxgb_printf(dip, CE_NOTE,
783 798 "%dx%sG (%d rxq, %d txq per port) %d %s.",
784 799 n10g ? n10g : n1g,
785 800 n10g ? "10" : "1",
786 801 n10g ? iaq.nrxq10g : iaq.nrxq1g,
787 802 n10g ? iaq.ntxq10g : iaq.ntxq1g,
788 803 sc->intr_count,
789 804 sc->intr_type == DDI_INTR_TYPE_MSIX ? "MSI-X interrupts" :
790 805 sc->intr_type == DDI_INTR_TYPE_MSI ? "MSI interrupts" :
791 806 "fixed interrupt");
792 807 }
793 808
794 809 sc->ksp = setup_kstats(sc);
795 810 sc->ksp_stat = setup_wc_kstats(sc);
796 811 sc->params.drv_memwin = MEMWIN_NIC;
797 812
798 813 done:
799 814 if (rc != DDI_SUCCESS) {
800 815 (void) t4_devo_detach(dip, DDI_DETACH);
801 816
802 817 /* rc may have errno style errors or DDI errors */
803 818 rc = DDI_FAILURE;
804 819 }
805 820
806 821 return (rc);
807 822 }
808 823
809 824 static int
810 825 t4_devo_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
811 826 {
812 827 int instance, i;
813 828 struct adapter *sc;
814 829 struct port_info *pi;
815 830 struct sge *s;
816 831
817 832 if (cmd != DDI_DETACH)
818 833 return (DDI_FAILURE);
819 834
820 835 instance = ddi_get_instance(dip);
821 836 sc = ddi_get_soft_state(t4_list, instance);
822 837 if (sc == NULL)
823 838 return (DDI_SUCCESS);
824 839
825 840 if (sc->flags & FULL_INIT_DONE) {
826 841 t4_intr_disable(sc);
827 842 for_each_port(sc, i) {
828 843 pi = sc->port[i];
|
↓ open down ↓ |
447 lines elided |
↑ open up ↑ |
829 844 if (pi && pi->flags & PORT_INIT_DONE)
830 845 (void) port_full_uninit(pi);
831 846 }
832 847 (void) adapter_full_uninit(sc);
833 848 }
834 849
835 850 /* Safe to call no matter what */
836 851 ddi_prop_remove_all(dip);
837 852 ddi_remove_minor_node(dip, NULL);
838 853
854 + for (i = 0; i < NCHAN; i++) {
855 + if (sc->tq[i]) {
856 + ddi_taskq_wait(sc->tq[i]);
857 + ddi_taskq_destroy(sc->tq[i]);
858 + }
859 + }
860 +
839 861 if (sc->ksp != NULL)
840 862 kstat_delete(sc->ksp);
841 863 if (sc->ksp_stat != NULL)
842 864 kstat_delete(sc->ksp_stat);
843 865
844 866 s = &sc->sge;
845 867 if (s->rxq != NULL)
846 868 kmem_free(s->rxq, s->nrxq * sizeof (struct sge_rxq));
847 869 #ifdef TCP_OFFLOAD_ENABLE
848 870 if (s->ofld_txq != NULL)
849 871 kmem_free(s->ofld_txq, s->nofldtxq * sizeof (struct sge_wrq));
850 872 if (s->ofld_rxq != NULL)
851 873 kmem_free(s->ofld_rxq,
852 874 s->nofldrxq * sizeof (struct sge_ofld_rxq));
853 875 if (s->ctrlq != NULL)
854 876 kmem_free(s->ctrlq,
855 877 sc->params.nports * sizeof (struct sge_wrq));
856 878 #endif
857 879 if (s->txq != NULL)
858 880 kmem_free(s->txq, s->ntxq * sizeof (struct sge_txq));
859 881 if (s->iqmap != NULL)
860 882 kmem_free(s->iqmap, s->niq * sizeof (struct sge_iq *));
861 883 if (s->eqmap != NULL)
862 884 kmem_free(s->eqmap, s->neq * sizeof (struct sge_eq *));
863 885
864 886 if (s->rxbuf_cache != NULL)
865 887 rxbuf_cache_destroy(s->rxbuf_cache);
866 888
867 889 if (sc->flags & INTR_ALLOCATED) {
868 890 for (i = 0; i < sc->intr_count; i++) {
869 891 (void) ddi_intr_remove_handler(sc->intr_handle[i]);
870 892 (void) ddi_intr_free(sc->intr_handle[i]);
871 893 }
872 894 sc->flags &= ~INTR_ALLOCATED;
873 895 }
874 896
875 897 if (sc->intr_handle != NULL) {
876 898 kmem_free(sc->intr_handle,
877 899 sc->intr_count * sizeof (*sc->intr_handle));
878 900 }
879 901
880 902 for_each_port(sc, i) {
881 903 pi = sc->port[i];
882 904 if (pi != NULL) {
883 905 mutex_destroy(&pi->lock);
884 906 kmem_free(pi, sizeof (*pi));
885 907 clrbit(&sc->registered_device_map, i);
886 908 }
887 909 }
888 910
889 911 if (sc->flags & FW_OK)
890 912 (void) t4_fw_bye(sc, sc->mbox);
891 913
892 914 if (sc->reg1h != NULL)
893 915 ddi_regs_map_free(&sc->reg1h);
894 916
895 917 if (sc->regh != NULL)
896 918 ddi_regs_map_free(&sc->regh);
897 919
898 920 if (sc->pci_regh != NULL)
899 921 pci_config_teardown(&sc->pci_regh);
900 922
901 923 mutex_enter(&t4_adapter_list_lock);
902 924 SLIST_REMOVE_HEAD(&t4_adapter_list, link);
903 925 mutex_exit(&t4_adapter_list_lock);
904 926
905 927 mutex_destroy(&sc->lock);
906 928 cv_destroy(&sc->cv);
907 929 mutex_destroy(&sc->sfl_lock);
908 930
909 931 #ifdef DEBUG
910 932 bzero(sc, sizeof (*sc));
911 933 #endif
912 934 ddi_soft_state_free(t4_list, instance);
913 935
914 936 return (DDI_SUCCESS);
915 937 }
916 938
917 939 static int
918 940 t4_devo_quiesce(dev_info_t *dip)
919 941 {
920 942 int instance;
921 943 struct adapter *sc;
922 944
923 945 instance = ddi_get_instance(dip);
924 946 sc = ddi_get_soft_state(t4_list, instance);
925 947 if (sc == NULL)
926 948 return (DDI_SUCCESS);
927 949
928 950 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
929 951 t4_intr_disable(sc);
930 952 t4_write_reg(sc, A_PL_RST, F_PIORSTMODE | F_PIORST);
931 953
932 954 return (DDI_SUCCESS);
933 955 }
934 956
935 957 static int
936 958 t4_bus_ctl(dev_info_t *dip, dev_info_t *rdip, ddi_ctl_enum_t op, void *arg,
937 959 void *result)
938 960 {
939 961 char s[4];
940 962 struct port_info *pi;
941 963 dev_info_t *child = (dev_info_t *)arg;
942 964
943 965 switch (op) {
944 966 case DDI_CTLOPS_REPORTDEV:
945 967 pi = ddi_get_parent_data(rdip);
946 968 pi->instance = ddi_get_instance(dip);
947 969 pi->child_inst = ddi_get_instance(rdip);
948 970 cmn_err(CE_CONT, "?%s%d is port %s on %s%d\n",
949 971 ddi_node_name(rdip), ddi_get_instance(rdip),
950 972 ddi_get_name_addr(rdip), ddi_driver_name(dip),
951 973 ddi_get_instance(dip));
952 974 return (DDI_SUCCESS);
953 975
954 976 case DDI_CTLOPS_INITCHILD:
955 977 pi = ddi_get_parent_data(child);
956 978 if (pi == NULL)
957 979 return (DDI_NOT_WELL_FORMED);
958 980 (void) snprintf(s, sizeof (s), "%d", pi->port_id);
959 981 ddi_set_name_addr(child, s);
960 982 return (DDI_SUCCESS);
961 983
962 984 case DDI_CTLOPS_UNINITCHILD:
963 985 ddi_set_name_addr(child, NULL);
964 986 return (DDI_SUCCESS);
965 987
966 988 case DDI_CTLOPS_ATTACH:
967 989 case DDI_CTLOPS_DETACH:
968 990 return (DDI_SUCCESS);
969 991
970 992 default:
971 993 return (ddi_ctlops(dip, rdip, op, arg, result));
972 994 }
973 995 }
974 996
975 997 static int
976 998 t4_bus_config(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op, void *arg,
977 999 dev_info_t **cdipp)
978 1000 {
979 1001 int instance, i;
980 1002 struct adapter *sc;
981 1003
982 1004 instance = ddi_get_instance(dip);
983 1005 sc = ddi_get_soft_state(t4_list, instance);
984 1006
985 1007 if (op == BUS_CONFIG_ONE) {
986 1008 char *c;
987 1009
988 1010 /*
989 1011 * arg is something like "cxgb@0" where 0 is the port_id hanging
990 1012 * off this nexus.
991 1013 */
992 1014
993 1015 c = arg;
994 1016 while (*(c + 1))
995 1017 c++;
996 1018
997 1019 /* There should be exactly 1 digit after '@' */
998 1020 if (*(c - 1) != '@')
999 1021 return (NDI_FAILURE);
1000 1022
1001 1023 i = *c - '0';
1002 1024
1003 1025 if (add_child_node(sc, i) != 0)
1004 1026 return (NDI_FAILURE);
1005 1027
1006 1028 flags |= NDI_ONLINE_ATTACH;
1007 1029
1008 1030 } else if (op == BUS_CONFIG_ALL || op == BUS_CONFIG_DRIVER) {
1009 1031 /* Allocate and bind all child device nodes */
1010 1032 for_each_port(sc, i)
1011 1033 (void) add_child_node(sc, i);
1012 1034 flags |= NDI_ONLINE_ATTACH;
1013 1035 }
1014 1036
1015 1037 return (ndi_busop_bus_config(dip, flags, op, arg, cdipp, 0));
1016 1038 }
1017 1039
1018 1040 static int
1019 1041 t4_bus_unconfig(dev_info_t *dip, uint_t flags, ddi_bus_config_op_t op,
1020 1042 void *arg)
1021 1043 {
1022 1044 int instance, i, rc;
1023 1045 struct adapter *sc;
1024 1046
1025 1047 instance = ddi_get_instance(dip);
1026 1048 sc = ddi_get_soft_state(t4_list, instance);
1027 1049
1028 1050 if (op == BUS_CONFIG_ONE || op == BUS_UNCONFIG_ALL ||
1029 1051 op == BUS_UNCONFIG_DRIVER)
1030 1052 flags |= NDI_UNCONFIG;
1031 1053
1032 1054 rc = ndi_busop_bus_unconfig(dip, flags, op, arg);
1033 1055 if (rc != 0)
1034 1056 return (rc);
1035 1057
1036 1058 if (op == BUS_UNCONFIG_ONE) {
1037 1059 char *c;
1038 1060
1039 1061 c = arg;
1040 1062 while (*(c + 1))
1041 1063 c++;
1042 1064
1043 1065 if (*(c - 1) != '@')
1044 1066 return (NDI_SUCCESS);
1045 1067
1046 1068 i = *c - '0';
1047 1069
1048 1070 rc = remove_child_node(sc, i);
1049 1071
1050 1072 } else if (op == BUS_UNCONFIG_ALL || op == BUS_UNCONFIG_DRIVER) {
1051 1073
1052 1074 for_each_port(sc, i)
1053 1075 (void) remove_child_node(sc, i);
1054 1076 }
1055 1077
1056 1078 return (rc);
1057 1079 }
1058 1080
1059 1081 /* ARGSUSED */
1060 1082 static int
1061 1083 t4_cb_open(dev_t *devp, int flag, int otyp, cred_t *credp)
1062 1084 {
1063 1085 struct adapter *sc;
1064 1086
1065 1087 if (otyp != OTYP_CHR)
1066 1088 return (EINVAL);
1067 1089
1068 1090 sc = ddi_get_soft_state(t4_list, getminor(*devp));
1069 1091 if (sc == NULL)
1070 1092 return (ENXIO);
1071 1093
1072 1094 return (atomic_cas_uint(&sc->open, 0, EBUSY));
1073 1095 }
1074 1096
1075 1097 /* ARGSUSED */
1076 1098 static int
1077 1099 t4_cb_close(dev_t dev, int flag, int otyp, cred_t *credp)
1078 1100 {
1079 1101 struct adapter *sc;
1080 1102
1081 1103 sc = ddi_get_soft_state(t4_list, getminor(dev));
1082 1104 if (sc == NULL)
1083 1105 return (EINVAL);
1084 1106
1085 1107 (void) atomic_swap_uint(&sc->open, 0);
1086 1108 return (0);
1087 1109 }
1088 1110
1089 1111 /* ARGSUSED */
1090 1112 static int
1091 1113 t4_cb_ioctl(dev_t dev, int cmd, intptr_t d, int mode, cred_t *credp, int *rp)
1092 1114 {
1093 1115 int instance;
1094 1116 struct adapter *sc;
1095 1117 void *data = (void *)d;
1096 1118
1097 1119 if (crgetuid(credp) != 0)
1098 1120 return (EPERM);
1099 1121
1100 1122 instance = getminor(dev);
1101 1123 sc = ddi_get_soft_state(t4_list, instance);
1102 1124 if (sc == NULL)
1103 1125 return (EINVAL);
1104 1126
1105 1127 return (t4_ioctl(sc, cmd, data, mode));
1106 1128 }
1107 1129
1108 1130 static unsigned int
1109 1131 getpf(struct adapter *sc)
1110 1132 {
1111 1133 int rc, *data;
1112 1134 uint_t n, pf;
1113 1135
1114 1136 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1115 1137 DDI_PROP_DONTPASS, "reg", &data, &n);
1116 1138 if (rc != DDI_SUCCESS) {
1117 1139 cxgb_printf(sc->dip, CE_WARN,
1118 1140 "failed to lookup \"reg\" property: %d", rc);
1119 1141 return (0xff);
1120 1142 }
1121 1143
1122 1144 pf = PCI_REG_FUNC_G(data[0]);
1123 1145 ddi_prop_free(data);
1124 1146
1125 1147 return (pf);
1126 1148 }
1127 1149
1128 1150
1129 1151 static struct fw_info *
1130 1152 find_fw_info(int chip)
1131 1153 {
1132 1154 u32 i;
1133 1155
1134 1156 fi[0].chip = CHELSIO_T4;
1135 1157 fi[0].fw_hdr.chip = FW_HDR_CHIP_T4;
1136 1158 fi[0].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T4));
1137 1159 fi[0].fw_hdr.intfver_nic = FW_INTFVER(T4, NIC);
1138 1160 fi[0].fw_hdr.intfver_vnic = FW_INTFVER(T4, VNIC);
1139 1161 fi[0].fw_hdr.intfver_ofld = FW_INTFVER(T4, OFLD);
1140 1162 fi[0].fw_hdr.intfver_ri = FW_INTFVER(T4, RI);
1141 1163 fi[0].fw_hdr.intfver_iscsipdu = FW_INTFVER(T4, ISCSIPDU);
1142 1164 fi[0].fw_hdr.intfver_iscsi = FW_INTFVER(T4, ISCSI);
1143 1165 fi[0].fw_hdr.intfver_fcoepdu = FW_INTFVER(T4, FCOEPDU);
1144 1166 fi[0].fw_hdr.intfver_fcoe = FW_INTFVER(T4, FCOE);
1145 1167
1146 1168 fi[1].chip = CHELSIO_T5;
1147 1169 fi[1].fw_hdr.chip = FW_HDR_CHIP_T5;
1148 1170 fi[1].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T5));
1149 1171 fi[1].fw_hdr.intfver_nic = FW_INTFVER(T5, NIC);
1150 1172 fi[1].fw_hdr.intfver_vnic = FW_INTFVER(T5, VNIC);
1151 1173 fi[1].fw_hdr.intfver_ofld = FW_INTFVER(T5, OFLD);
1152 1174 fi[1].fw_hdr.intfver_ri = FW_INTFVER(T5, RI);
1153 1175 fi[1].fw_hdr.intfver_iscsipdu = FW_INTFVER(T5, ISCSIPDU);
1154 1176 fi[1].fw_hdr.intfver_iscsi = FW_INTFVER(T5, ISCSI);
1155 1177 fi[1].fw_hdr.intfver_fcoepdu = FW_INTFVER(T5, FCOEPDU);
1156 1178 fi[1].fw_hdr.intfver_fcoe = FW_INTFVER(T5, FCOE);
1157 1179
1158 1180 fi[2].chip = CHELSIO_T6;
1159 1181 fi[2].fw_hdr.chip = FW_HDR_CHIP_T6;
1160 1182 fi[2].fw_hdr.fw_ver = cpu_to_be32(FW_VERSION(T6));
1161 1183 fi[2].fw_hdr.intfver_nic = FW_INTFVER(T6, NIC);
1162 1184 fi[2].fw_hdr.intfver_vnic = FW_INTFVER(T6, VNIC);
1163 1185 fi[2].fw_hdr.intfver_ofld = FW_INTFVER(T6, OFLD);
1164 1186 fi[2].fw_hdr.intfver_ri = FW_INTFVER(T6, RI);
1165 1187 fi[2].fw_hdr.intfver_iscsipdu = FW_INTFVER(T6, ISCSIPDU);
1166 1188 fi[2].fw_hdr.intfver_iscsi = FW_INTFVER(T6, ISCSI);
1167 1189 fi[2].fw_hdr.intfver_fcoepdu = FW_INTFVER(T6, FCOEPDU);
1168 1190 fi[2].fw_hdr.intfver_fcoe = FW_INTFVER(T6, FCOE);
1169 1191
1170 1192 for (i = 0; i < ARRAY_SIZE(fi); i++) {
1171 1193 if (fi[i].chip == chip)
1172 1194 return &fi[i];
1173 1195 }
1174 1196
1175 1197 return NULL;
1176 1198 }
1177 1199
1178 1200 /*
1179 1201 * Install a compatible firmware (if required), establish contact with it,
1180 1202 * become the master, and reset the device.
1181 1203 */
1182 1204 static int
1183 1205 prep_firmware(struct adapter *sc)
1184 1206 {
1185 1207 int rc;
1186 1208 int fw_size;
1187 1209 int reset = 1;
1188 1210 enum dev_state state;
1189 1211 unsigned char *fw_data;
1190 1212 struct fw_info *fw_info;
1191 1213 struct fw_hdr *card_fw;
1192 1214
1193 1215 struct driver_properties *p = &sc->props;
1194 1216
1195 1217 /* Contact firmware, request master */
1196 1218 rc = t4_fw_hello(sc, sc->mbox, sc->mbox, MASTER_MUST, &state);
1197 1219 if (rc < 0) {
1198 1220 rc = -rc;
1199 1221 cxgb_printf(sc->dip, CE_WARN,
1200 1222 "failed to connect to the firmware: %d.", rc);
1201 1223 return (rc);
1202 1224 }
1203 1225
1204 1226 if (rc == sc->mbox)
1205 1227 sc->flags |= MASTER_PF;
1206 1228
1207 1229 /* We may need FW version info for later reporting */
1208 1230 t4_get_version_info(sc);
1209 1231 fw_info = find_fw_info(CHELSIO_CHIP_VERSION(sc->params.chip));
1210 1232 /* allocate memory to read the header of the firmware on the
1211 1233 * card
1212 1234 */
1213 1235 if (!fw_info) {
1214 1236 cxgb_printf(sc->dip, CE_WARN,
1215 1237 "unable to look up firmware information for chip %d.\n",
1216 1238 CHELSIO_CHIP_VERSION(sc->params.chip));
1217 1239 return EINVAL;
1218 1240 }
1219 1241 card_fw = kmem_zalloc(sizeof(*card_fw), KM_SLEEP);
1220 1242 if(!card_fw) {
1221 1243 cxgb_printf(sc->dip, CE_WARN,
1222 1244 "Memory allocation for card FW header failed\n");
1223 1245 return ENOMEM;
1224 1246 }
1225 1247 switch(CHELSIO_CHIP_VERSION(sc->params.chip)) {
1226 1248 case CHELSIO_T4:
1227 1249 fw_data = t4fw_data;
1228 1250 fw_size = t4fw_size;
1229 1251 break;
1230 1252 case CHELSIO_T5:
1231 1253 fw_data = t5fw_data;
1232 1254 fw_size = t5fw_size;
1233 1255 break;
1234 1256 case CHELSIO_T6:
1235 1257 fw_data = t6fw_data;
1236 1258 fw_size = t6fw_size;
1237 1259 break;
1238 1260 default:
1239 1261 cxgb_printf(sc->dip, CE_WARN, "Adapter type not supported\n");
1240 1262 kmem_free(card_fw, sizeof(*card_fw));
1241 1263 return EINVAL;
1242 1264 }
1243 1265
1244 1266 rc = -t4_prep_fw(sc, fw_info, fw_data, fw_size, card_fw,
1245 1267 p->t4_fw_install, state, &reset);
1246 1268
1247 1269 kmem_free(card_fw, sizeof(*card_fw));
1248 1270
1249 1271 if (rc != 0) {
1250 1272 cxgb_printf(sc->dip, CE_WARN,
1251 1273 "failed to install firmware: %d", rc);
1252 1274 return (rc);
1253 1275 } else {
1254 1276 /* refresh */
1255 1277 (void) t4_check_fw_version(sc);
1256 1278 }
1257 1279
1258 1280 /* Reset device */
1259 1281 rc = -t4_fw_reset(sc, sc->mbox, F_PIORSTMODE | F_PIORST);
1260 1282 if (rc != 0) {
1261 1283 cxgb_printf(sc->dip, CE_WARN,
1262 1284 "firmware reset failed: %d.", rc);
1263 1285 if (rc != ETIMEDOUT && rc != EIO)
1264 1286 (void) t4_fw_bye(sc, sc->mbox);
1265 1287 return (rc);
1266 1288 }
1267 1289
1268 1290 /* Partition adapter resources as specified in the config file. */
1269 1291 if (sc->flags & MASTER_PF) {
1270 1292 /* Handle default vs special T4 config file */
1271 1293
1272 1294 rc = partition_resources(sc);
1273 1295 if (rc != 0)
1274 1296 goto err; /* error message displayed already */
1275 1297 }
1276 1298
1277 1299 sc->flags |= FW_OK;
1278 1300 return (0);
1279 1301 err:
1280 1302 return (rc);
1281 1303
1282 1304 }
1283 1305
1284 1306 static const struct memwin t4_memwin[] = {
1285 1307 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1286 1308 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1287 1309 { MEMWIN2_BASE, MEMWIN2_APERTURE }
1288 1310 };
1289 1311
1290 1312 static const struct memwin t5_memwin[] = {
1291 1313 { MEMWIN0_BASE, MEMWIN0_APERTURE },
1292 1314 { MEMWIN1_BASE, MEMWIN1_APERTURE },
1293 1315 { MEMWIN2_BASE_T5, MEMWIN2_APERTURE_T5 },
1294 1316 };
1295 1317
1296 1318 #define FW_PARAM_DEV(param) \
1297 1319 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_DEV) | \
1298 1320 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_DEV_##param))
1299 1321 #define FW_PARAM_PFVF(param) \
1300 1322 (V_FW_PARAMS_MNEM(FW_PARAMS_MNEM_PFVF) | \
1301 1323 V_FW_PARAMS_PARAM_X(FW_PARAMS_PARAM_PFVF_##param))
1302 1324
1303 1325 /*
1304 1326 * Verify that the memory range specified by the memtype/offset/len pair is
1305 1327 * valid and lies entirely within the memtype specified. The global address of
1306 1328 * the start of the range is returned in addr.
1307 1329 */
1308 1330 int
1309 1331 validate_mt_off_len(struct adapter *sc, int mtype, uint32_t off, int len,
1310 1332 uint32_t *addr)
1311 1333 {
1312 1334 uint32_t em, addr_len, maddr, mlen;
1313 1335
1314 1336 /* Memory can only be accessed in naturally aligned 4 byte units */
1315 1337 if (off & 3 || len & 3 || len == 0)
1316 1338 return (EINVAL);
1317 1339
1318 1340 em = t4_read_reg(sc, A_MA_TARGET_MEM_ENABLE);
1319 1341 switch (mtype) {
1320 1342 case MEM_EDC0:
1321 1343 if (!(em & F_EDRAM0_ENABLE))
1322 1344 return (EINVAL);
1323 1345 addr_len = t4_read_reg(sc, A_MA_EDRAM0_BAR);
1324 1346 maddr = G_EDRAM0_BASE(addr_len) << 20;
1325 1347 mlen = G_EDRAM0_SIZE(addr_len) << 20;
1326 1348 break;
1327 1349 case MEM_EDC1:
1328 1350 if (!(em & F_EDRAM1_ENABLE))
1329 1351 return (EINVAL);
1330 1352 addr_len = t4_read_reg(sc, A_MA_EDRAM1_BAR);
1331 1353 maddr = G_EDRAM1_BASE(addr_len) << 20;
1332 1354 mlen = G_EDRAM1_SIZE(addr_len) << 20;
1333 1355 break;
1334 1356 case MEM_MC:
1335 1357 if (!(em & F_EXT_MEM_ENABLE))
1336 1358 return (EINVAL);
1337 1359 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY_BAR);
1338 1360 maddr = G_EXT_MEM_BASE(addr_len) << 20;
1339 1361 mlen = G_EXT_MEM_SIZE(addr_len) << 20;
1340 1362 break;
1341 1363 case MEM_MC1:
1342 1364 if (is_t4(sc->params.chip) || !(em & F_EXT_MEM1_ENABLE))
1343 1365 return (EINVAL);
1344 1366 addr_len = t4_read_reg(sc, A_MA_EXT_MEMORY1_BAR);
1345 1367 maddr = G_EXT_MEM1_BASE(addr_len) << 20;
1346 1368 mlen = G_EXT_MEM1_SIZE(addr_len) << 20;
1347 1369 break;
1348 1370 default:
1349 1371 return (EINVAL);
1350 1372 }
1351 1373
1352 1374 if (mlen > 0 && off < mlen && off + len <= mlen) {
1353 1375 *addr = maddr + off; /* global address */
1354 1376 return (0);
1355 1377 }
1356 1378
1357 1379 return (EFAULT);
1358 1380 }
1359 1381
1360 1382 void
1361 1383 memwin_info(struct adapter *sc, int win, uint32_t *base, uint32_t *aperture)
1362 1384 {
1363 1385 const struct memwin *mw;
1364 1386
1365 1387 if (is_t4(sc->params.chip)) {
1366 1388 mw = &t4_memwin[win];
1367 1389 } else {
1368 1390 mw = &t5_memwin[win];
1369 1391 }
1370 1392
1371 1393 if (base != NULL)
1372 1394 *base = mw->base;
1373 1395 if (aperture != NULL)
1374 1396 *aperture = mw->aperture;
1375 1397 }
1376 1398
1377 1399 /*
1378 1400 * Upload configuration file to card's memory.
1379 1401 */
1380 1402 static int
1381 1403 upload_config_file(struct adapter *sc, uint32_t *mt, uint32_t *ma)
1382 1404 {
1383 1405 int rc = 0, cflen;
1384 1406 u_int i, n;
1385 1407 uint32_t param, val, addr, mtype, maddr;
1386 1408 uint32_t off, mw_base, mw_aperture;
1387 1409 const uint32_t *cfdata;
1388 1410
1389 1411 /* Figure out where the firmware wants us to upload it. */
1390 1412 param = FW_PARAM_DEV(CF);
1391 1413 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
1392 1414 if (rc != 0) {
1393 1415 /* Firmwares without config file support will fail this way */
1394 1416 cxgb_printf(sc->dip, CE_WARN,
1395 1417 "failed to query config file location: %d.\n", rc);
1396 1418 return (rc);
1397 1419 }
1398 1420 *mt = mtype = G_FW_PARAMS_PARAM_Y(val);
1399 1421 *ma = maddr = G_FW_PARAMS_PARAM_Z(val) << 16;
1400 1422
1401 1423 switch (CHELSIO_CHIP_VERSION(sc->params.chip)) {
1402 1424 case CHELSIO_T4:
1403 1425 cflen = t4cfg_size & ~3;
1404 1426 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1405 1427 cfdata = (const uint32_t *)t4cfg_data;
1406 1428 break;
1407 1429 case CHELSIO_T5:
1408 1430 cflen = t5cfg_size & ~3;
1409 1431 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1410 1432 cfdata = (const uint32_t *)t5cfg_data;
1411 1433 break;
1412 1434 case CHELSIO_T6:
1413 1435 cflen = t6cfg_size & ~3;
1414 1436 /* LINTED: E_BAD_PTR_CAST_ALIGN */
1415 1437 cfdata = (const uint32_t *)t6cfg_data;
1416 1438 break;
1417 1439 default:
1418 1440 cxgb_printf(sc->dip, CE_WARN,
1419 1441 "Invalid Adapter detected\n");
1420 1442 return EINVAL;
1421 1443 }
1422 1444
1423 1445 if (cflen > FLASH_CFG_MAX_SIZE) {
1424 1446 cxgb_printf(sc->dip, CE_WARN,
1425 1447 "config file too long (%d, max allowed is %d). ",
1426 1448 cflen, FLASH_CFG_MAX_SIZE);
1427 1449 return (EFBIG);
1428 1450 }
1429 1451
1430 1452 rc = validate_mt_off_len(sc, mtype, maddr, cflen, &addr);
1431 1453 if (rc != 0) {
1432 1454
1433 1455 cxgb_printf(sc->dip, CE_WARN,
1434 1456 "%s: addr (%d/0x%x) or len %d is not valid: %d. "
1435 1457 "Will try to use the config on the card, if any.\n",
1436 1458 __func__, mtype, maddr, cflen, rc);
1437 1459 return (EFAULT);
1438 1460 }
1439 1461
1440 1462 memwin_info(sc, 2, &mw_base, &mw_aperture);
1441 1463 while (cflen) {
1442 1464 off = position_memwin(sc, 2, addr);
1443 1465 n = min(cflen, mw_aperture - off);
1444 1466 for (i = 0; i < n; i += 4)
1445 1467 t4_write_reg(sc, mw_base + off + i, *cfdata++);
1446 1468 cflen -= n;
1447 1469 addr += n;
1448 1470 }
1449 1471
1450 1472 return (rc);
1451 1473 }
1452 1474
1453 1475 /*
1454 1476 * Partition chip resources for use between various PFs, VFs, etc. This is done
1455 1477 * by uploading the firmware configuration file to the adapter and instructing
1456 1478 * the firmware to process it.
1457 1479 */
1458 1480 static int
1459 1481 partition_resources(struct adapter *sc)
1460 1482 {
1461 1483 int rc;
1462 1484 struct fw_caps_config_cmd caps;
1463 1485 uint32_t mtype, maddr, finicsum, cfcsum;
1464 1486
1465 1487 rc = upload_config_file(sc, &mtype, &maddr);
1466 1488 if (rc != 0) {
1467 1489 mtype = FW_MEMTYPE_CF_FLASH;
1468 1490 maddr = t4_flash_cfg_addr(sc);
1469 1491 }
1470 1492
1471 1493 bzero(&caps, sizeof (caps));
1472 1494 caps.op_to_write = BE_32(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1473 1495 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1474 1496 caps.cfvalid_to_len16 = BE_32(F_FW_CAPS_CONFIG_CMD_CFVALID |
1475 1497 V_FW_CAPS_CONFIG_CMD_MEMTYPE_CF(mtype) |
1476 1498 V_FW_CAPS_CONFIG_CMD_MEMADDR64K_CF(maddr >> 16) | FW_LEN16(caps));
1477 1499 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1478 1500 if (rc != 0) {
1479 1501 cxgb_printf(sc->dip, CE_WARN,
1480 1502 "failed to pre-process config file: %d.\n", rc);
1481 1503 return (rc);
1482 1504 }
1483 1505
1484 1506 finicsum = ntohl(caps.finicsum);
1485 1507 cfcsum = ntohl(caps.cfcsum);
1486 1508 if (finicsum != cfcsum) {
1487 1509 cxgb_printf(sc->dip, CE_WARN,
1488 1510 "WARNING: config file checksum mismatch: %08x %08x\n",
1489 1511 finicsum, cfcsum);
1490 1512 }
1491 1513 sc->cfcsum = cfcsum;
1492 1514
1493 1515 /* TODO: Need to configure this correctly */
1494 1516 caps.toecaps = htons(FW_CAPS_CONFIG_TOE);
1495 1517 caps.iscsicaps = 0;
1496 1518 caps.rdmacaps = 0;
1497 1519 caps.fcoecaps = 0;
1498 1520 /* TODO: Disable VNIC cap for now */
1499 1521 caps.niccaps ^= htons(FW_CAPS_CONFIG_NIC_VM);
1500 1522
1501 1523 caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1502 1524 F_FW_CMD_REQUEST | F_FW_CMD_WRITE);
1503 1525 caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1504 1526 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), NULL);
1505 1527 if (rc != 0) {
1506 1528 cxgb_printf(sc->dip, CE_WARN,
1507 1529 "failed to process config file: %d.\n", rc);
1508 1530 return (rc);
1509 1531 }
1510 1532
1511 1533 return (0);
1512 1534 }
1513 1535
1514 1536 /*
1515 1537 * Tweak configuration based on module parameters, etc. Most of these have
1516 1538 * defaults assigned to them by Firmware Configuration Files (if we're using
1517 1539 * them) but need to be explicitly set if we're using hard-coded
1518 1540 * initialization. But even in the case of using Firmware Configuration
1519 1541 * Files, we'd like to expose the ability to change these via module
1520 1542 * parameters so these are essentially common tweaks/settings for
1521 1543 * Configuration Files and hard-coded initialization ...
1522 1544 */
1523 1545 static int
1524 1546 adap__pre_init_tweaks(struct adapter *sc)
1525 1547 {
1526 1548 int rx_dma_offset = 2; /* Offset of RX packets into DMA buffers */
1527 1549
1528 1550 /*
1529 1551 * Fix up various Host-Dependent Parameters like Page Size, Cache
1530 1552 * Line Size, etc. The firmware default is for a 4KB Page Size and
1531 1553 * 64B Cache Line Size ...
1532 1554 */
1533 1555 (void) t4_fixup_host_params_compat(sc, PAGE_SIZE, CACHE_LINE, T5_LAST_REV);
1534 1556
1535 1557 t4_set_reg_field(sc, A_SGE_CONTROL,
1536 1558 V_PKTSHIFT(M_PKTSHIFT), V_PKTSHIFT(rx_dma_offset));
1537 1559
1538 1560 return 0;
1539 1561 }
1540 1562 /*
1541 1563 * Retrieve parameters that are needed (or nice to have) prior to calling
1542 1564 * t4_sge_init and t4_fw_initialize.
1543 1565 */
1544 1566 static int
1545 1567 get_params__pre_init(struct adapter *sc)
1546 1568 {
1547 1569 int rc;
1548 1570 uint32_t param[2], val[2];
1549 1571 struct fw_devlog_cmd cmd;
1550 1572 struct devlog_params *dlog = &sc->params.devlog;
1551 1573
1552 1574 /*
1553 1575 * Grab the raw VPD parameters.
1554 1576 */
1555 1577 rc = -t4_get_raw_vpd_params(sc, &sc->params.vpd);
1556 1578 if (rc != 0) {
1557 1579 cxgb_printf(sc->dip, CE_WARN,
1558 1580 "failed to query VPD parameters (pre_init): %d.\n", rc);
1559 1581 return (rc);
1560 1582 }
1561 1583
1562 1584 param[0] = FW_PARAM_DEV(PORTVEC);
1563 1585 param[1] = FW_PARAM_DEV(CCLK);
1564 1586 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 2, param, val);
1565 1587 if (rc != 0) {
1566 1588 cxgb_printf(sc->dip, CE_WARN,
1567 1589 "failed to query parameters (pre_init): %d.\n", rc);
1568 1590 return (rc);
1569 1591 }
1570 1592
1571 1593 sc->params.portvec = val[0];
1572 1594 sc->params.nports = 0;
1573 1595 while (val[0]) {
1574 1596 sc->params.nports++;
1575 1597 val[0] &= val[0] - 1;
1576 1598 }
1577 1599
1578 1600 sc->params.vpd.cclk = val[1];
1579 1601
1580 1602 /* Read device log parameters. */
1581 1603 bzero(&cmd, sizeof (cmd));
1582 1604 cmd.op_to_write = htonl(V_FW_CMD_OP(FW_DEVLOG_CMD) |
1583 1605 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1584 1606 cmd.retval_len16 = htonl(FW_LEN16(cmd));
1585 1607 rc = -t4_wr_mbox(sc, sc->mbox, &cmd, sizeof (cmd), &cmd);
1586 1608 if (rc != 0) {
1587 1609 cxgb_printf(sc->dip, CE_WARN,
1588 1610 "failed to get devlog parameters: %d.\n", rc);
1589 1611 bzero(dlog, sizeof (*dlog));
1590 1612 rc = 0; /* devlog isn't critical for device operation */
1591 1613 } else {
1592 1614 val[0] = ntohl(cmd.memtype_devlog_memaddr16_devlog);
1593 1615 dlog->memtype = G_FW_DEVLOG_CMD_MEMTYPE_DEVLOG(val[0]);
1594 1616 dlog->start = G_FW_DEVLOG_CMD_MEMADDR16_DEVLOG(val[0]) << 4;
1595 1617 dlog->size = ntohl(cmd.memsize_devlog);
1596 1618 }
1597 1619
1598 1620 return (rc);
1599 1621 }
1600 1622
1601 1623 /*
1602 1624 * Retrieve various parameters that are of interest to the driver. The device
1603 1625 * has been initialized by the firmware at this point.
1604 1626 */
1605 1627 static int
1606 1628 get_params__post_init(struct adapter *sc)
1607 1629 {
1608 1630 int rc;
1609 1631 uint32_t param[7], val[7];
1610 1632 struct fw_caps_config_cmd caps;
1611 1633
1612 1634 param[0] = FW_PARAM_PFVF(IQFLINT_START);
1613 1635 param[1] = FW_PARAM_PFVF(EQ_START);
1614 1636 param[2] = FW_PARAM_PFVF(FILTER_START);
1615 1637 param[3] = FW_PARAM_PFVF(FILTER_END);
1616 1638 param[4] = FW_PARAM_PFVF(L2T_START);
1617 1639 param[5] = FW_PARAM_PFVF(L2T_END);
1618 1640 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1619 1641 if (rc != 0) {
1620 1642 cxgb_printf(sc->dip, CE_WARN,
1621 1643 "failed to query parameters (post_init): %d.\n", rc);
1622 1644 return (rc);
1623 1645 }
1624 1646
1625 1647 /* LINTED: E_ASSIGN_NARROW_CONV */
1626 1648 sc->sge.iq_start = val[0];
1627 1649 sc->sge.eq_start = val[1];
1628 1650 sc->tids.ftid_base = val[2];
1629 1651 sc->tids.nftids = val[3] - val[2] + 1;
1630 1652 sc->vres.l2t.start = val[4];
1631 1653 sc->vres.l2t.size = val[5] - val[4] + 1;
1632 1654
1633 1655 /* get capabilites */
1634 1656 bzero(&caps, sizeof (caps));
1635 1657 caps.op_to_write = htonl(V_FW_CMD_OP(FW_CAPS_CONFIG_CMD) |
1636 1658 F_FW_CMD_REQUEST | F_FW_CMD_READ);
1637 1659 caps.cfvalid_to_len16 = htonl(FW_LEN16(caps));
1638 1660 rc = -t4_wr_mbox(sc, sc->mbox, &caps, sizeof (caps), &caps);
1639 1661 if (rc != 0) {
1640 1662 cxgb_printf(sc->dip, CE_WARN,
1641 1663 "failed to get card capabilities: %d.\n", rc);
1642 1664 return (rc);
1643 1665 }
1644 1666
1645 1667 if (caps.toecaps != 0) {
1646 1668 /* query offload-related parameters */
1647 1669 param[0] = FW_PARAM_DEV(NTID);
1648 1670 param[1] = FW_PARAM_PFVF(SERVER_START);
1649 1671 param[2] = FW_PARAM_PFVF(SERVER_END);
1650 1672 param[3] = FW_PARAM_PFVF(TDDP_START);
1651 1673 param[4] = FW_PARAM_PFVF(TDDP_END);
1652 1674 param[5] = FW_PARAM_DEV(FLOWC_BUFFIFO_SZ);
1653 1675 rc = -t4_query_params(sc, sc->mbox, sc->pf, 0, 6, param, val);
1654 1676 if (rc != 0) {
1655 1677 cxgb_printf(sc->dip, CE_WARN,
1656 1678 "failed to query TOE parameters: %d.\n", rc);
1657 1679 return (rc);
1658 1680 }
1659 1681 sc->tids.ntids = val[0];
1660 1682 sc->tids.natids = min(sc->tids.ntids / 2, MAX_ATIDS);
1661 1683 sc->tids.stid_base = val[1];
1662 1684 sc->tids.nstids = val[2] - val[1] + 1;
1663 1685 sc->vres.ddp.start = val[3];
1664 1686 sc->vres.ddp.size = val[4] - val[3] + 1;
1665 1687 sc->params.ofldq_wr_cred = val[5];
1666 1688 sc->params.offload = 1;
1667 1689 }
1668 1690
1669 1691 /* These are finalized by FW initialization, load their values now */
1670 1692 val[0] = t4_read_reg(sc, A_TP_TIMER_RESOLUTION);
1671 1693 sc->params.tp.tre = G_TIMERRESOLUTION(val[0]);
1672 1694 sc->params.tp.dack_re = G_DELAYEDACKRESOLUTION(val[0]);
1673 1695 t4_read_mtu_tbl(sc, sc->params.mtus, NULL);
1674 1696
1675 1697 return (rc);
1676 1698 }
1677 1699
1678 1700 static int
1679 1701 set_params__post_init(struct adapter *sc)
1680 1702 {
1681 1703 uint32_t param, val;
1682 1704
1683 1705 /* ask for encapsulated CPLs */
1684 1706 param = FW_PARAM_PFVF(CPLFW4MSG_ENCAP);
1685 1707 val = 1;
1686 1708 (void)t4_set_params(sc, sc->mbox, sc->pf, 0, 1, ¶m, &val);
1687 1709
1688 1710 return (0);
1689 1711 }
1690 1712
1691 1713 /* TODO: verify */
1692 1714 static void
1693 1715 setup_memwin(struct adapter *sc)
1694 1716 {
1695 1717 pci_regspec_t *data;
1696 1718 int rc;
1697 1719 uint_t n;
1698 1720 uintptr_t bar0;
1699 1721 uintptr_t mem_win0_base, mem_win1_base, mem_win2_base;
1700 1722 uintptr_t mem_win2_aperture;
1701 1723
1702 1724 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, sc->dip,
1703 1725 DDI_PROP_DONTPASS, "assigned-addresses", (int **)&data, &n);
1704 1726 if (rc != DDI_SUCCESS) {
1705 1727 cxgb_printf(sc->dip, CE_WARN,
1706 1728 "failed to lookup \"assigned-addresses\" property: %d", rc);
1707 1729 return;
1708 1730 }
1709 1731 n /= sizeof (*data);
1710 1732
1711 1733 bar0 = ((uint64_t)data[0].pci_phys_mid << 32) | data[0].pci_phys_low;
1712 1734 ddi_prop_free(data);
1713 1735
1714 1736 if (is_t4(sc->params.chip)) {
1715 1737 mem_win0_base = bar0 + MEMWIN0_BASE;
1716 1738 mem_win1_base = bar0 + MEMWIN1_BASE;
1717 1739 mem_win2_base = bar0 + MEMWIN2_BASE;
1718 1740 mem_win2_aperture = MEMWIN2_APERTURE;
1719 1741 } else {
1720 1742 /* For T5, only relative offset inside the PCIe BAR is passed */
1721 1743 mem_win0_base = MEMWIN0_BASE;
1722 1744 mem_win1_base = MEMWIN1_BASE;
1723 1745 mem_win2_base = MEMWIN2_BASE_T5;
1724 1746 mem_win2_aperture = MEMWIN2_APERTURE_T5;
1725 1747 }
1726 1748
1727 1749 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 0),
1728 1750 mem_win0_base | V_BIR(0) |
1729 1751 V_WINDOW(ilog2(MEMWIN0_APERTURE) - 10));
1730 1752
1731 1753 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 1),
1732 1754 mem_win1_base | V_BIR(0) |
1733 1755 V_WINDOW(ilog2(MEMWIN1_APERTURE) - 10));
1734 1756
1735 1757 t4_write_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2),
1736 1758 mem_win2_base | V_BIR(0) |
1737 1759 V_WINDOW(ilog2(mem_win2_aperture) - 10));
1738 1760
1739 1761 /* flush */
1740 1762 (void)t4_read_reg(sc, PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_BASE_WIN, 2));
1741 1763 }
1742 1764
1743 1765 /*
1744 1766 * Positions the memory window such that it can be used to access the specified
1745 1767 * address in the chip's address space. The return value is the offset of addr
1746 1768 * from the start of the window.
1747 1769 */
1748 1770 uint32_t
1749 1771 position_memwin(struct adapter *sc, int n, uint32_t addr)
1750 1772 {
1751 1773 uint32_t start, pf;
1752 1774 uint32_t reg;
1753 1775
1754 1776 if (addr & 3) {
1755 1777 cxgb_printf(sc->dip, CE_WARN,
1756 1778 "addr (0x%x) is not at a 4B boundary.\n", addr);
1757 1779 return (EFAULT);
1758 1780 }
1759 1781
1760 1782 if (is_t4(sc->params.chip)) {
1761 1783 pf = 0;
1762 1784 start = addr & ~0xf; /* start must be 16B aligned */
1763 1785 } else {
1764 1786 pf = V_PFNUM(sc->pf);
1765 1787 start = addr & ~0x7f; /* start must be 128B aligned */
1766 1788 }
1767 1789 reg = PCIE_MEM_ACCESS_REG(A_PCIE_MEM_ACCESS_OFFSET, n);
1768 1790
1769 1791 t4_write_reg(sc, reg, start | pf);
1770 1792 (void) t4_read_reg(sc, reg);
1771 1793
1772 1794 return (addr - start);
1773 1795 }
1774 1796
1775 1797
1776 1798 /*
1777 1799 * Reads the named property and fills up the "data" array (which has at least
1778 1800 * "count" elements). We first try and lookup the property for our dev_t and
1779 1801 * then retry with DDI_DEV_T_ANY if it's not found.
1780 1802 *
1781 1803 * Returns non-zero if the property was found and "data" has been updated.
1782 1804 */
1783 1805 static int
1784 1806 prop_lookup_int_array(struct adapter *sc, char *name, int *data, uint_t count)
1785 1807 {
1786 1808 dev_info_t *dip = sc->dip;
1787 1809 dev_t dev = sc->dev;
1788 1810 int rc, *d;
1789 1811 uint_t i, n;
1790 1812
1791 1813 rc = ddi_prop_lookup_int_array(dev, dip, DDI_PROP_DONTPASS,
1792 1814 name, &d, &n);
1793 1815 if (rc == DDI_PROP_SUCCESS)
1794 1816 goto found;
1795 1817
1796 1818 if (rc != DDI_PROP_NOT_FOUND) {
1797 1819 cxgb_printf(dip, CE_WARN,
1798 1820 "failed to lookup property %s for minor %d: %d.",
1799 1821 name, getminor(dev), rc);
1800 1822 return (0);
1801 1823 }
1802 1824
1803 1825 rc = ddi_prop_lookup_int_array(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1804 1826 name, &d, &n);
1805 1827 if (rc == DDI_PROP_SUCCESS)
1806 1828 goto found;
1807 1829
1808 1830 if (rc != DDI_PROP_NOT_FOUND) {
1809 1831 cxgb_printf(dip, CE_WARN,
1810 1832 "failed to lookup property %s: %d.", name, rc);
1811 1833 return (0);
1812 1834 }
1813 1835
1814 1836 return (0);
1815 1837
1816 1838 found:
1817 1839 if (n > count) {
1818 1840 cxgb_printf(dip, CE_NOTE,
1819 1841 "property %s has too many elements (%d), ignoring extras",
1820 1842 name, n);
1821 1843 }
1822 1844
1823 1845 for (i = 0; i < n && i < count; i++)
1824 1846 data[i] = d[i];
1825 1847 ddi_prop_free(d);
1826 1848
1827 1849 return (1);
1828 1850 }
1829 1851
1830 1852 static int
1831 1853 prop_lookup_int(struct adapter *sc, char *name, int defval)
1832 1854 {
1833 1855 int rc;
1834 1856
1835 1857 rc = ddi_prop_get_int(sc->dev, sc->dip, DDI_PROP_DONTPASS, name, -1);
1836 1858 if (rc != -1)
1837 1859 return (rc);
1838 1860
1839 1861 return (ddi_prop_get_int(DDI_DEV_T_ANY, sc->dip, DDI_PROP_DONTPASS,
1840 1862 name, defval));
1841 1863 }
1842 1864
1843 1865 static int
1844 1866 init_driver_props(struct adapter *sc, struct driver_properties *p)
1845 1867 {
1846 1868 dev_t dev = sc->dev;
1847 1869 dev_info_t *dip = sc->dip;
1848 1870 int i, *data;
1849 1871 uint_t tmr[SGE_NTIMERS] = {5, 10, 20, 50, 100, 200};
1850 1872 uint_t cnt[SGE_NCOUNTERS] = {1, 8, 16, 32}; /* 63 max */
1851 1873
1852 1874 /*
1853 1875 * Holdoff timer
1854 1876 */
1855 1877 data = &p->timer_val[0];
1856 1878 for (i = 0; i < SGE_NTIMERS; i++)
1857 1879 data[i] = tmr[i];
1858 1880 (void) prop_lookup_int_array(sc, "holdoff-timer-values", data,
1859 1881 SGE_NTIMERS);
1860 1882 for (i = 0; i < SGE_NTIMERS; i++) {
1861 1883 int limit = 200U;
1862 1884 if (data[i] > limit) {
1863 1885 cxgb_printf(dip, CE_WARN,
1864 1886 "holdoff timer %d is too high (%d), lowered to %d.",
1865 1887 i, data[i], limit);
1866 1888 data[i] = limit;
1867 1889 }
1868 1890 }
1869 1891 (void) ddi_prop_update_int_array(dev, dip, "holdoff-timer-values",
1870 1892 data, SGE_NTIMERS);
1871 1893
1872 1894 /*
1873 1895 * Holdoff packet counter
1874 1896 */
1875 1897 data = &p->counter_val[0];
1876 1898 for (i = 0; i < SGE_NCOUNTERS; i++)
1877 1899 data[i] = cnt[i];
1878 1900 (void) prop_lookup_int_array(sc, "holdoff-pkt-counter-values", data,
1879 1901 SGE_NCOUNTERS);
1880 1902 for (i = 0; i < SGE_NCOUNTERS; i++) {
1881 1903 int limit = M_THRESHOLD_0;
1882 1904 if (data[i] > limit) {
1883 1905 cxgb_printf(dip, CE_WARN,
1884 1906 "holdoff pkt-counter %d is too high (%d), "
1885 1907 "lowered to %d.", i, data[i], limit);
1886 1908 data[i] = limit;
1887 1909 }
1888 1910 }
1889 1911 (void) ddi_prop_update_int_array(dev, dip, "holdoff-pkt-counter-values",
1890 1912 data, SGE_NCOUNTERS);
1891 1913
1892 1914 /*
1893 1915 * Maximum # of tx and rx queues to use for each
1894 1916 * 100G, 40G, 25G, 10G and 1G port.
1895 1917 */
1896 1918 p->max_ntxq_10g = prop_lookup_int(sc, "max-ntxq-10G-port", 8);
1897 1919 (void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1898 1920 p->max_ntxq_10g);
1899 1921
1900 1922 p->max_nrxq_10g = prop_lookup_int(sc, "max-nrxq-10G-port", 8);
1901 1923 (void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1902 1924 p->max_nrxq_10g);
1903 1925
1904 1926 p->max_ntxq_1g = prop_lookup_int(sc, "max-ntxq-1G-port", 2);
1905 1927 (void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1906 1928 p->max_ntxq_1g);
1907 1929
1908 1930 p->max_nrxq_1g = prop_lookup_int(sc, "max-nrxq-1G-port", 2);
1909 1931 (void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1910 1932 p->max_nrxq_1g);
1911 1933
1912 1934 #ifdef TCP_OFFLOAD_ENABLE
1913 1935 p->max_nofldtxq_10g = prop_lookup_int(sc, "max-nofldtxq-10G-port", 8);
1914 1936 (void) ddi_prop_update_int(dev, dip, "max-ntxq-10G-port",
1915 1937 p->max_nofldtxq_10g);
1916 1938
1917 1939 p->max_nofldrxq_10g = prop_lookup_int(sc, "max-nofldrxq-10G-port", 2);
1918 1940 (void) ddi_prop_update_int(dev, dip, "max-nrxq-10G-port",
1919 1941 p->max_nofldrxq_10g);
1920 1942
1921 1943 p->max_nofldtxq_1g = prop_lookup_int(sc, "max-nofldtxq-1G-port", 2);
1922 1944 (void) ddi_prop_update_int(dev, dip, "max-ntxq-1G-port",
1923 1945 p->max_nofldtxq_1g);
1924 1946
1925 1947 p->max_nofldrxq_1g = prop_lookup_int(sc, "max-nofldrxq-1G-port", 1);
1926 1948 (void) ddi_prop_update_int(dev, dip, "max-nrxq-1G-port",
1927 1949 p->max_nofldrxq_1g);
1928 1950 #endif
1929 1951
1930 1952 /*
1931 1953 * Holdoff parameters for 10G and 1G ports.
1932 1954 */
1933 1955 p->tmr_idx_10g = prop_lookup_int(sc, "holdoff-timer-idx-10G", 0);
1934 1956 (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-10G",
1935 1957 p->tmr_idx_10g);
1936 1958
1937 1959 p->pktc_idx_10g = prop_lookup_int(sc, "holdoff-pktc-idx-10G", 2);
1938 1960 (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-10G",
1939 1961 p->pktc_idx_10g);
1940 1962
1941 1963 p->tmr_idx_1g = prop_lookup_int(sc, "holdoff-timer-idx-1G", 0);
1942 1964 (void) ddi_prop_update_int(dev, dip, "holdoff-timer-idx-1G",
1943 1965 p->tmr_idx_1g);
1944 1966
1945 1967 p->pktc_idx_1g = prop_lookup_int(sc, "holdoff-pktc-idx-1G", 2);
1946 1968 (void) ddi_prop_update_int(dev, dip, "holdoff-pktc-idx-1G",
1947 1969 p->pktc_idx_1g);
1948 1970
1949 1971 /*
1950 1972 * Size (number of entries) of each tx and rx queue.
1951 1973 */
1952 1974 i = prop_lookup_int(sc, "qsize-txq", TX_EQ_QSIZE);
1953 1975 p->qsize_txq = max(i, 128);
1954 1976 if (p->qsize_txq != i) {
1955 1977 cxgb_printf(dip, CE_WARN,
1956 1978 "using %d instead of %d as the tx queue size",
1957 1979 p->qsize_txq, i);
1958 1980 }
1959 1981 (void) ddi_prop_update_int(dev, dip, "qsize-txq", p->qsize_txq);
1960 1982
1961 1983 i = prop_lookup_int(sc, "qsize-rxq", RX_IQ_QSIZE);
1962 1984 p->qsize_rxq = max(i, 128);
1963 1985 while (p->qsize_rxq & 7)
1964 1986 p->qsize_rxq--;
1965 1987 if (p->qsize_rxq != i) {
1966 1988 cxgb_printf(dip, CE_WARN,
1967 1989 "using %d instead of %d as the rx queue size",
1968 1990 p->qsize_rxq, i);
1969 1991 }
1970 1992 (void) ddi_prop_update_int(dev, dip, "qsize-rxq", p->qsize_rxq);
1971 1993
1972 1994 /*
1973 1995 * Interrupt types allowed.
1974 1996 * Bits 0, 1, 2 = INTx, MSI, MSI-X respectively. See sys/ddi_intr.h
1975 1997 */
1976 1998 p->intr_types = prop_lookup_int(sc, "interrupt-types",
1977 1999 DDI_INTR_TYPE_MSIX | DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_FIXED);
1978 2000 (void) ddi_prop_update_int(dev, dip, "interrupt-types", p->intr_types);
1979 2001
1980 2002 /*
1981 2003 * Forwarded interrupt queues. Create this property to force the driver
1982 2004 * to use forwarded interrupt queues.
1983 2005 */
1984 2006 if (ddi_prop_exists(dev, dip, DDI_PROP_DONTPASS,
1985 2007 "interrupt-forwarding") != 0 ||
1986 2008 ddi_prop_exists(DDI_DEV_T_ANY, dip, DDI_PROP_DONTPASS,
1987 2009 "interrupt-forwarding") != 0) {
1988 2010 UNIMPLEMENTED();
1989 2011 (void) ddi_prop_create(dev, dip, DDI_PROP_CANSLEEP,
1990 2012 "interrupt-forwarding", NULL, 0);
1991 2013 }
1992 2014
1993 2015 /*
1994 2016 * Write combining
1995 2017 * 0 to disable, 1 to enable
1996 2018 */
1997 2019 p->wc = prop_lookup_int(sc, "write-combine", 1);
1998 2020 cxgb_printf(dip, CE_WARN, "write-combine: using of %d", p->wc);
1999 2021 if (p->wc != 0 && p->wc != 1) {
2000 2022 cxgb_printf(dip, CE_WARN,
2001 2023 "write-combine: using 1 instead of %d", p->wc);
2002 2024 p->wc = 1;
2003 2025 }
2004 2026 (void) ddi_prop_update_int(dev, dip, "write-combine", p->wc);
2005 2027
2006 2028 p->t4_fw_install = prop_lookup_int(sc, "t4_fw_install", 1);
2007 2029 if (p->t4_fw_install != 0 && p->t4_fw_install != 2)
2008 2030 p->t4_fw_install = 1;
2009 2031 (void) ddi_prop_update_int(dev, dip, "t4_fw_install", p->t4_fw_install);
2010 2032
2011 2033 /* Multiple Rings */
2012 2034 p->multi_rings = prop_lookup_int(sc, "multi-rings", 1);
2013 2035 if (p->multi_rings != 0 && p->multi_rings != 1) {
2014 2036 cxgb_printf(dip, CE_NOTE,
2015 2037 "multi-rings: using value 1 instead of %d", p->multi_rings);
2016 2038 p->multi_rings = 1;
2017 2039 }
2018 2040
2019 2041 (void) ddi_prop_update_int(dev, dip, "multi-rings", p->multi_rings);
2020 2042
2021 2043 return (0);
2022 2044 }
2023 2045
2024 2046 static int
2025 2047 remove_extra_props(struct adapter *sc, int n10g, int n1g)
2026 2048 {
2027 2049 if (n10g == 0) {
2028 2050 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-10G-port");
2029 2051 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-10G-port");
2030 2052 (void) ddi_prop_remove(sc->dev, sc->dip,
2031 2053 "holdoff-timer-idx-10G");
2032 2054 (void) ddi_prop_remove(sc->dev, sc->dip,
2033 2055 "holdoff-pktc-idx-10G");
2034 2056 }
2035 2057
2036 2058 if (n1g == 0) {
2037 2059 (void) ddi_prop_remove(sc->dev, sc->dip, "max-ntxq-1G-port");
2038 2060 (void) ddi_prop_remove(sc->dev, sc->dip, "max-nrxq-1G-port");
2039 2061 (void) ddi_prop_remove(sc->dev, sc->dip,
2040 2062 "holdoff-timer-idx-1G");
2041 2063 (void) ddi_prop_remove(sc->dev, sc->dip, "holdoff-pktc-idx-1G");
2042 2064 }
2043 2065
2044 2066 return (0);
2045 2067 }
2046 2068
2047 2069 static int
2048 2070 cfg_itype_and_nqueues(struct adapter *sc, int n10g, int n1g,
2049 2071 struct intrs_and_queues *iaq)
2050 2072 {
2051 2073 struct driver_properties *p = &sc->props;
2052 2074 int rc, itype, itypes, navail, nc, nrxq10g, nrxq1g, n;
2053 2075 int nofldrxq10g = 0, nofldrxq1g = 0;
2054 2076
2055 2077 bzero(iaq, sizeof (*iaq));
2056 2078 nc = ncpus; /* our snapshot of the number of CPUs */
2057 2079 iaq->ntxq10g = min(nc, p->max_ntxq_10g);
2058 2080 iaq->ntxq1g = min(nc, p->max_ntxq_1g);
2059 2081 iaq->nrxq10g = nrxq10g = min(nc, p->max_nrxq_10g);
2060 2082 iaq->nrxq1g = nrxq1g = min(nc, p->max_nrxq_1g);
2061 2083 #ifdef TCP_OFFLOAD_ENABLE
2062 2084 iaq->nofldtxq10g = min(nc, p->max_nofldtxq_10g);
2063 2085 iaq->nofldtxq1g = min(nc, p->max_nofldtxq_1g);
2064 2086 iaq->nofldrxq10g = nofldrxq10g = min(nc, p->max_nofldrxq_10g);
2065 2087 iaq->nofldrxq1g = nofldrxq1g = min(nc, p->max_nofldrxq_1g);
2066 2088 #endif
2067 2089
2068 2090 rc = ddi_intr_get_supported_types(sc->dip, &itypes);
2069 2091 if (rc != DDI_SUCCESS) {
2070 2092 cxgb_printf(sc->dip, CE_WARN,
2071 2093 "failed to determine supported interrupt types: %d", rc);
2072 2094 return (rc);
2073 2095 }
2074 2096
2075 2097 for (itype = DDI_INTR_TYPE_MSIX; itype; itype >>= 1) {
2076 2098 ASSERT(itype == DDI_INTR_TYPE_MSIX ||
2077 2099 itype == DDI_INTR_TYPE_MSI ||
2078 2100 itype == DDI_INTR_TYPE_FIXED);
2079 2101
2080 2102 if ((itype & itypes & p->intr_types) == 0)
2081 2103 continue; /* not supported or not allowed */
2082 2104
2083 2105 navail = 0;
2084 2106 rc = ddi_intr_get_navail(sc->dip, itype, &navail);
2085 2107 if (rc != DDI_SUCCESS || navail == 0) {
2086 2108 cxgb_printf(sc->dip, CE_WARN,
2087 2109 "failed to get # of interrupts for type %d: %d",
2088 2110 itype, rc);
2089 2111 continue; /* carry on */
2090 2112 }
2091 2113
2092 2114 iaq->intr_type = itype;
2093 2115 if (navail == 0)
2094 2116 continue;
2095 2117
2096 2118 /*
2097 2119 * Best option: an interrupt vector for errors, one for the
2098 2120 * firmware event queue, and one each for each rxq (NIC as well
2099 2121 * as offload).
2100 2122 */
2101 2123 iaq->nirq = T4_EXTRA_INTR;
2102 2124 iaq->nirq += n10g * (nrxq10g + nofldrxq10g);
2103 2125 iaq->nirq += n1g * (nrxq1g + nofldrxq1g);
2104 2126
2105 2127 if (iaq->nirq <= navail &&
2106 2128 (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2107 2129 iaq->intr_fwd = 0;
2108 2130 goto allocate;
2109 2131 }
2110 2132
2111 2133 /*
2112 2134 * Second best option: an interrupt vector for errors, one for
2113 2135 * the firmware event queue, and one each for either NIC or
2114 2136 * offload rxq's.
2115 2137 */
2116 2138 iaq->nirq = T4_EXTRA_INTR;
2117 2139 iaq->nirq += n10g * max(nrxq10g, nofldrxq10g);
2118 2140 iaq->nirq += n1g * max(nrxq1g, nofldrxq1g);
2119 2141 if (iaq->nirq <= navail &&
2120 2142 (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq))) {
2121 2143 iaq->intr_fwd = 1;
2122 2144 goto allocate;
2123 2145 }
2124 2146
2125 2147 /*
2126 2148 * Next best option: an interrupt vector for errors, one for the
2127 2149 * firmware event queue, and at least one per port. At this
2128 2150 * point we know we'll have to downsize nrxq or nofldrxq to fit
2129 2151 * what's available to us.
2130 2152 */
2131 2153 iaq->nirq = T4_EXTRA_INTR;
2132 2154 iaq->nirq += n10g + n1g;
2133 2155 if (iaq->nirq <= navail) {
2134 2156 int leftover = navail - iaq->nirq;
2135 2157
2136 2158 if (n10g > 0) {
2137 2159 int target = max(nrxq10g, nofldrxq10g);
2138 2160
2139 2161 n = 1;
2140 2162 while (n < target && leftover >= n10g) {
2141 2163 leftover -= n10g;
2142 2164 iaq->nirq += n10g;
2143 2165 n++;
2144 2166 }
2145 2167 iaq->nrxq10g = min(n, nrxq10g);
2146 2168 #ifdef TCP_OFFLOAD_ENABLE
2147 2169 iaq->nofldrxq10g = min(n, nofldrxq10g);
2148 2170 #endif
2149 2171 }
2150 2172
2151 2173 if (n1g > 0) {
2152 2174 int target = max(nrxq1g, nofldrxq1g);
2153 2175
2154 2176 n = 1;
2155 2177 while (n < target && leftover >= n1g) {
2156 2178 leftover -= n1g;
2157 2179 iaq->nirq += n1g;
2158 2180 n++;
2159 2181 }
2160 2182 iaq->nrxq1g = min(n, nrxq1g);
2161 2183 #ifdef TCP_OFFLOAD_ENABLE
2162 2184 iaq->nofldrxq1g = min(n, nofldrxq1g);
2163 2185 #endif
2164 2186 }
2165 2187
2166 2188 /* We have arrived at a minimum value required to enable
2167 2189 * per queue irq(either NIC or offload). Thus for non-
2168 2190 * offload case, we will get a vector per queue, while
2169 2191 * offload case, we will get a vector per offload/NIC q.
2170 2192 * Hence enable Interrupt forwarding only for offload
2171 2193 * case.
2172 2194 */
2173 2195 #ifdef TCP_OFFLOAD_ENABLE
2174 2196 if (itype != DDI_INTR_TYPE_MSI || ISP2(iaq->nirq)) {
2175 2197 iaq->intr_fwd = 1;
2176 2198 #else
2177 2199 if (itype != DDI_INTR_TYPE_MSI) {
2178 2200 #endif
2179 2201 goto allocate;
2180 2202 }
2181 2203 }
2182 2204
2183 2205 /*
2184 2206 * Least desirable option: one interrupt vector for everything.
2185 2207 */
2186 2208 iaq->nirq = iaq->nrxq10g = iaq->nrxq1g = 1;
2187 2209 #ifdef TCP_OFFLOAD_ENABLE
2188 2210 iaq->nofldrxq10g = iaq->nofldrxq1g = 1;
2189 2211 #endif
2190 2212 iaq->intr_fwd = 1;
2191 2213
2192 2214 allocate:
2193 2215 return (0);
2194 2216 }
2195 2217
2196 2218 cxgb_printf(sc->dip, CE_WARN,
2197 2219 "failed to find a usable interrupt type. supported=%d, allowed=%d",
2198 2220 itypes, p->intr_types);
2199 2221 return (DDI_FAILURE);
2200 2222 }
2201 2223
2202 2224 static int
2203 2225 add_child_node(struct adapter *sc, int idx)
2204 2226 {
2205 2227 int rc;
2206 2228 struct port_info *pi;
2207 2229
2208 2230 if (idx < 0 || idx >= sc->params.nports)
2209 2231 return (EINVAL);
2210 2232
2211 2233 pi = sc->port[idx];
2212 2234 if (pi == NULL)
2213 2235 return (ENODEV); /* t4_port_init failed earlier */
2214 2236
2215 2237 PORT_LOCK(pi);
2216 2238 if (pi->dip != NULL) {
2217 2239 rc = 0; /* EEXIST really, but then bus_config fails */
2218 2240 goto done;
2219 2241 }
2220 2242
2221 2243 rc = ndi_devi_alloc(sc->dip, T4_PORT_NAME, DEVI_SID_NODEID, &pi->dip);
2222 2244 if (rc != DDI_SUCCESS || pi->dip == NULL) {
2223 2245 rc = ENOMEM;
2224 2246 goto done;
2225 2247 }
2226 2248
2227 2249 (void) ddi_set_parent_data(pi->dip, pi);
2228 2250 (void) ndi_devi_bind_driver(pi->dip, 0);
2229 2251 rc = 0;
2230 2252 done:
2231 2253 PORT_UNLOCK(pi);
2232 2254 return (rc);
2233 2255 }
2234 2256
2235 2257 static int
2236 2258 remove_child_node(struct adapter *sc, int idx)
2237 2259 {
2238 2260 int rc;
2239 2261 struct port_info *pi;
2240 2262
2241 2263 if (idx < 0 || idx >= sc->params.nports)
2242 2264 return (EINVAL);
2243 2265
2244 2266 pi = sc->port[idx];
2245 2267 if (pi == NULL)
2246 2268 return (ENODEV);
2247 2269
2248 2270 PORT_LOCK(pi);
2249 2271 if (pi->dip == NULL) {
2250 2272 rc = ENODEV;
2251 2273 goto done;
2252 2274 }
2253 2275
2254 2276 rc = ndi_devi_free(pi->dip);
2255 2277 if (rc == 0)
2256 2278 pi->dip = NULL;
2257 2279 done:
2258 2280 PORT_UNLOCK(pi);
2259 2281 return (rc);
2260 2282 }
2261 2283
2262 2284 #define KS_UINIT(x) kstat_named_init(&kstatp->x, #x, KSTAT_DATA_ULONG)
2263 2285 #define KS_CINIT(x) kstat_named_init(&kstatp->x, #x, KSTAT_DATA_CHAR)
2264 2286 #define KS_U_SET(x, y) kstatp->x.value.ul = (y)
2265 2287 #define KS_C_SET(x, ...) \
2266 2288 (void) snprintf(kstatp->x.value.c, 16, __VA_ARGS__)
2267 2289
2268 2290 /*
2269 2291 * t4nex:X:config
2270 2292 */
2271 2293 struct t4_kstats {
2272 2294 kstat_named_t chip_ver;
2273 2295 kstat_named_t fw_vers;
2274 2296 kstat_named_t tp_vers;
2275 2297 kstat_named_t driver_version;
2276 2298 kstat_named_t serial_number;
2277 2299 kstat_named_t ec_level;
2278 2300 kstat_named_t id;
2279 2301 kstat_named_t bus_type;
2280 2302 kstat_named_t bus_width;
2281 2303 kstat_named_t bus_speed;
2282 2304 kstat_named_t core_clock;
2283 2305 kstat_named_t port_cnt;
2284 2306 kstat_named_t port_type;
2285 2307 kstat_named_t pci_vendor_id;
2286 2308 kstat_named_t pci_device_id;
2287 2309 };
2288 2310 static kstat_t *
2289 2311 setup_kstats(struct adapter *sc)
2290 2312 {
2291 2313 kstat_t *ksp;
2292 2314 struct t4_kstats *kstatp;
2293 2315 int ndata;
2294 2316 struct pci_params *p = &sc->params.pci;
2295 2317 struct vpd_params *v = &sc->params.vpd;
2296 2318 uint16_t pci_vendor, pci_device;
2297 2319
2298 2320 ndata = sizeof (struct t4_kstats) / sizeof (kstat_named_t);
2299 2321
2300 2322 ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "config",
2301 2323 "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2302 2324 if (ksp == NULL) {
2303 2325 cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2304 2326 return (NULL);
2305 2327 }
2306 2328
2307 2329 kstatp = (struct t4_kstats *)ksp->ks_data;
2308 2330
2309 2331 KS_UINIT(chip_ver);
2310 2332 KS_CINIT(fw_vers);
2311 2333 KS_CINIT(tp_vers);
2312 2334 KS_CINIT(driver_version);
2313 2335 KS_CINIT(serial_number);
2314 2336 KS_CINIT(ec_level);
2315 2337 KS_CINIT(id);
2316 2338 KS_CINIT(bus_type);
2317 2339 KS_CINIT(bus_width);
2318 2340 KS_CINIT(bus_speed);
2319 2341 KS_UINIT(core_clock);
2320 2342 KS_UINIT(port_cnt);
2321 2343 KS_CINIT(port_type);
2322 2344 KS_CINIT(pci_vendor_id);
2323 2345 KS_CINIT(pci_device_id);
2324 2346
2325 2347 KS_U_SET(chip_ver, sc->params.chip);
2326 2348 KS_C_SET(fw_vers, "%d.%d.%d.%d",
2327 2349 G_FW_HDR_FW_VER_MAJOR(sc->params.fw_vers),
2328 2350 G_FW_HDR_FW_VER_MINOR(sc->params.fw_vers),
2329 2351 G_FW_HDR_FW_VER_MICRO(sc->params.fw_vers),
2330 2352 G_FW_HDR_FW_VER_BUILD(sc->params.fw_vers));
2331 2353 KS_C_SET(tp_vers, "%d.%d.%d.%d",
2332 2354 G_FW_HDR_FW_VER_MAJOR(sc->params.tp_vers),
2333 2355 G_FW_HDR_FW_VER_MINOR(sc->params.tp_vers),
2334 2356 G_FW_HDR_FW_VER_MICRO(sc->params.tp_vers),
2335 2357 G_FW_HDR_FW_VER_BUILD(sc->params.tp_vers));
2336 2358 KS_C_SET(driver_version, DRV_VERSION);
2337 2359 KS_C_SET(serial_number, "%s", v->sn);
2338 2360 KS_C_SET(ec_level, "%s", v->ec);
2339 2361 KS_C_SET(id, "%s", v->id);
2340 2362 KS_C_SET(bus_type, "pci-express");
2341 2363 KS_C_SET(bus_width, "x%d lanes", p->width);
2342 2364 KS_C_SET(bus_speed, "%d", p->speed);
2343 2365 KS_U_SET(core_clock, v->cclk);
2344 2366 KS_U_SET(port_cnt, sc->params.nports);
2345 2367
2346 2368 t4_os_pci_read_cfg2(sc, PCI_CONF_VENID, &pci_vendor);
2347 2369 KS_C_SET(pci_vendor_id, "0x%x", pci_vendor);
2348 2370
2349 2371 t4_os_pci_read_cfg2(sc, PCI_CONF_DEVID, &pci_device);
2350 2372 KS_C_SET(pci_device_id, "0x%x", pci_device);
2351 2373
2352 2374 KS_C_SET(port_type, "%s/%s/%s/%s",
2353 2375 print_port_speed(sc->port[0]),
2354 2376 print_port_speed(sc->port[1]),
2355 2377 print_port_speed(sc->port[2]),
2356 2378 print_port_speed(sc->port[3]));
2357 2379
2358 2380 /* Do NOT set ksp->ks_update. These kstats do not change. */
2359 2381
2360 2382 /* Install the kstat */
2361 2383 ksp->ks_private = (void *)sc;
2362 2384 kstat_install(ksp);
2363 2385
2364 2386 return (ksp);
2365 2387 }
2366 2388
2367 2389 /*
2368 2390 * t4nex:X:stat
2369 2391 */
2370 2392 struct t4_wc_kstats {
2371 2393 kstat_named_t write_coal_success;
2372 2394 kstat_named_t write_coal_failure;
2373 2395 };
2374 2396 static kstat_t *
2375 2397 setup_wc_kstats(struct adapter *sc)
2376 2398 {
2377 2399 kstat_t *ksp;
2378 2400 struct t4_wc_kstats *kstatp;
2379 2401 int ndata;
2380 2402
2381 2403 ndata = sizeof(struct t4_wc_kstats) / sizeof(kstat_named_t);
2382 2404 ksp = kstat_create(T4_NEXUS_NAME, ddi_get_instance(sc->dip), "stats",
2383 2405 "nexus", KSTAT_TYPE_NAMED, ndata, 0);
2384 2406 if (ksp == NULL) {
2385 2407 cxgb_printf(sc->dip, CE_WARN, "failed to initialize kstats.");
2386 2408 return (NULL);
2387 2409 }
2388 2410
2389 2411 kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2390 2412
2391 2413 KS_UINIT(write_coal_success);
2392 2414 KS_UINIT(write_coal_failure);
2393 2415
2394 2416 ksp->ks_update = update_wc_kstats;
2395 2417 /* Install the kstat */
2396 2418 ksp->ks_private = (void *)sc;
2397 2419 kstat_install(ksp);
2398 2420
2399 2421 return (ksp);
2400 2422 }
2401 2423
2402 2424 static int
2403 2425 update_wc_kstats(kstat_t *ksp, int rw)
2404 2426 {
2405 2427 struct t4_wc_kstats *kstatp = (struct t4_wc_kstats *)ksp->ks_data;
2406 2428 struct adapter *sc = ksp->ks_private;
2407 2429 uint32_t wc_total, wc_success, wc_failure;
2408 2430
2409 2431 if (rw == KSTAT_WRITE)
2410 2432 return (0);
2411 2433
2412 2434 if (is_t5(sc->params.chip)) {
2413 2435 wc_total = t4_read_reg(sc, A_SGE_STAT_TOTAL);
2414 2436 wc_failure = t4_read_reg(sc, A_SGE_STAT_MATCH);
2415 2437 wc_success = wc_total - wc_failure;
2416 2438 } else {
2417 2439 wc_success = 0;
2418 2440 wc_failure = 0;
2419 2441 }
2420 2442
2421 2443 KS_U_SET(write_coal_success, wc_success);
2422 2444 KS_U_SET(write_coal_failure, wc_failure);
2423 2445
2424 2446 return (0);
2425 2447 }
2426 2448
2427 2449 int
2428 2450 adapter_full_init(struct adapter *sc)
2429 2451 {
2430 2452 int i, rc = 0;
2431 2453
2432 2454 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2433 2455
2434 2456 rc = t4_setup_adapter_queues(sc);
2435 2457 if (rc != 0)
2436 2458 goto done;
2437 2459
2438 2460 if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2439 2461 (void) ddi_intr_block_enable(sc->intr_handle, sc->intr_count);
2440 2462 else {
2441 2463 for (i = 0; i < sc->intr_count; i++)
2442 2464 (void) ddi_intr_enable(sc->intr_handle[i]);
2443 2465 }
2444 2466 t4_intr_enable(sc);
2445 2467 sc->flags |= FULL_INIT_DONE;
2446 2468
2447 2469 #ifdef TCP_OFFLOAD_ENABLE
2448 2470 /* TODO: wrong place to enable TOE capability */
2449 2471 if (is_offload(sc) != 0) {
2450 2472 for_each_port(sc, i) {
2451 2473 struct port_info *pi = sc->port[i];
2452 2474 rc = toe_capability(pi, 1);
2453 2475 if (rc != 0) {
2454 2476 cxgb_printf(pi->dip, CE_WARN,
2455 2477 "Failed to activate toe capability: %d",
2456 2478 rc);
2457 2479 rc = 0; /* not a fatal error */
2458 2480 }
2459 2481 }
2460 2482 }
2461 2483 #endif
2462 2484
2463 2485 done:
2464 2486 if (rc != 0)
2465 2487 (void) adapter_full_uninit(sc);
2466 2488
2467 2489 return (rc);
2468 2490 }
2469 2491
2470 2492 int
2471 2493 adapter_full_uninit(struct adapter *sc)
2472 2494 {
2473 2495 int i, rc = 0;
2474 2496
2475 2497 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2476 2498
2477 2499 if (sc->intr_cap & DDI_INTR_FLAG_BLOCK)
2478 2500 (void) ddi_intr_block_disable(sc->intr_handle, sc->intr_count);
2479 2501 else {
2480 2502 for (i = 0; i < sc->intr_count; i++)
2481 2503 (void) ddi_intr_disable(sc->intr_handle[i]);
2482 2504 }
2483 2505
2484 2506 rc = t4_teardown_adapter_queues(sc);
2485 2507 if (rc != 0)
2486 2508 return (rc);
2487 2509
2488 2510 sc->flags &= ~FULL_INIT_DONE;
2489 2511
2490 2512 return (0);
2491 2513 }
2492 2514
2493 2515 int
2494 2516 port_full_init(struct port_info *pi)
2495 2517 {
2496 2518 struct adapter *sc = pi->adapter;
2497 2519 uint16_t *rss;
2498 2520 struct sge_rxq *rxq;
2499 2521 int rc, i;
2500 2522
2501 2523 ADAPTER_LOCK_ASSERT_NOTOWNED(sc);
2502 2524 ASSERT((pi->flags & PORT_INIT_DONE) == 0);
2503 2525
2504 2526 /*
2505 2527 * Allocate tx/rx/fl queues for this port.
2506 2528 */
2507 2529 rc = t4_setup_port_queues(pi);
2508 2530 if (rc != 0)
2509 2531 goto done; /* error message displayed already */
2510 2532
2511 2533 /*
2512 2534 * Setup RSS for this port.
2513 2535 */
2514 2536 rss = kmem_zalloc(pi->nrxq * sizeof (*rss), KM_SLEEP);
2515 2537 for_each_rxq(pi, i, rxq) {
2516 2538 rss[i] = rxq->iq.abs_id;
2517 2539 }
2518 2540 rc = -t4_config_rss_range(sc, sc->mbox, pi->viid, 0,
2519 2541 pi->rss_size, rss, pi->nrxq);
2520 2542 kmem_free(rss, pi->nrxq * sizeof (*rss));
2521 2543 if (rc != 0) {
2522 2544 cxgb_printf(pi->dip, CE_WARN, "rss_config failed: %d", rc);
2523 2545 goto done;
2524 2546 }
2525 2547
2526 2548 pi->flags |= PORT_INIT_DONE;
2527 2549 done:
2528 2550 if (rc != 0)
2529 2551 (void) port_full_uninit(pi);
2530 2552
2531 2553 return (rc);
2532 2554 }
2533 2555
2534 2556 /*
2535 2557 * Idempotent.
2536 2558 */
2537 2559 int
2538 2560 port_full_uninit(struct port_info *pi)
2539 2561 {
2540 2562
2541 2563 ASSERT(pi->flags & PORT_INIT_DONE);
2542 2564
2543 2565 (void) t4_teardown_port_queues(pi);
2544 2566 pi->flags &= ~PORT_INIT_DONE;
2545 2567
2546 2568 return (0);
2547 2569 }
2548 2570
2549 2571 void
2550 2572 enable_port_queues(struct port_info *pi)
2551 2573 {
2552 2574 struct adapter *sc = pi->adapter;
2553 2575 int i;
2554 2576 struct sge_iq *iq;
2555 2577 struct sge_rxq *rxq;
2556 2578 #ifdef TCP_OFFLOAD_ENABLE
2557 2579 struct sge_ofld_rxq *ofld_rxq;
2558 2580 #endif
2559 2581
2560 2582 ASSERT(pi->flags & PORT_INIT_DONE);
2561 2583
2562 2584 /*
2563 2585 * TODO: whatever was queued up after we set iq->state to IQS_DISABLED
2564 2586 * back in disable_port_queues will be processed now, after an unbounded
2565 2587 * delay. This can't be good.
2566 2588 */
2567 2589
2568 2590 #ifdef TCP_OFFLOAD_ENABLE
2569 2591 for_each_ofld_rxq(pi, i, ofld_rxq) {
2570 2592 iq = &ofld_rxq->iq;
2571 2593 if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2572 2594 IQS_DISABLED)
2573 2595 panic("%s: iq %p wasn't disabled", __func__,
2574 2596 (void *)iq);
2575 2597 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2576 2598 V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2577 2599 }
2578 2600 #endif
2579 2601
2580 2602 for_each_rxq(pi, i, rxq) {
2581 2603 iq = &rxq->iq;
2582 2604 if (atomic_cas_uint(&iq->state, IQS_DISABLED, IQS_IDLE) !=
2583 2605 IQS_DISABLED)
2584 2606 panic("%s: iq %p wasn't disabled", __func__,
2585 2607 (void *) iq);
2586 2608 t4_write_reg(sc, MYPF_REG(A_SGE_PF_GTS),
2587 2609 V_SEINTARM(iq->intr_params) | V_INGRESSQID(iq->cntxt_id));
2588 2610 }
2589 2611 }
2590 2612
2591 2613 void
2592 2614 disable_port_queues(struct port_info *pi)
2593 2615 {
2594 2616 int i;
2595 2617 struct adapter *sc = pi->adapter;
2596 2618 struct sge_rxq *rxq;
2597 2619 #ifdef TCP_OFFLOAD_ENABLE
2598 2620 struct sge_ofld_rxq *ofld_rxq;
2599 2621 #endif
2600 2622
2601 2623 ASSERT(pi->flags & PORT_INIT_DONE);
2602 2624
2603 2625 /*
2604 2626 * TODO: need proper implementation for all tx queues (ctrl, eth, ofld).
2605 2627 */
2606 2628
2607 2629 #ifdef TCP_OFFLOAD_ENABLE
2608 2630 for_each_ofld_rxq(pi, i, ofld_rxq) {
2609 2631 while (atomic_cas_uint(&ofld_rxq->iq.state, IQS_IDLE,
2610 2632 IQS_DISABLED) != IQS_IDLE)
2611 2633 msleep(1);
2612 2634 }
2613 2635 #endif
2614 2636
2615 2637 for_each_rxq(pi, i, rxq) {
2616 2638 while (atomic_cas_uint(&rxq->iq.state, IQS_IDLE,
2617 2639 IQS_DISABLED) != IQS_IDLE)
2618 2640 msleep(1);
2619 2641 }
2620 2642
2621 2643 mutex_enter(&sc->sfl_lock);
2622 2644 #ifdef TCP_OFFLOAD_ENABLE
2623 2645 for_each_ofld_rxq(pi, i, ofld_rxq)
2624 2646 ofld_rxq->fl.flags |= FL_DOOMED;
2625 2647 #endif
2626 2648 for_each_rxq(pi, i, rxq)
2627 2649 rxq->fl.flags |= FL_DOOMED;
2628 2650 mutex_exit(&sc->sfl_lock);
2629 2651 /* TODO: need to wait for all fl's to be removed from sc->sfl */
2630 2652 }
2631 2653
2632 2654 void
2633 2655 t4_fatal_err(struct adapter *sc)
2634 2656 {
2635 2657 t4_set_reg_field(sc, A_SGE_CONTROL, F_GLOBALENABLE, 0);
2636 2658 t4_intr_disable(sc);
2637 2659 cxgb_printf(sc->dip, CE_WARN,
2638 2660 "encountered fatal error, adapter stopped.");
2639 2661 }
2640 2662
2641 2663 int
2642 2664 t4_os_find_pci_capability(struct adapter *sc, int cap)
2643 2665 {
2644 2666 uint16_t stat;
2645 2667 uint8_t cap_ptr, cap_id;
2646 2668
2647 2669 t4_os_pci_read_cfg2(sc, PCI_CONF_STAT, &stat);
2648 2670 if ((stat & PCI_STAT_CAP) == 0)
2649 2671 return (0); /* does not implement capabilities */
2650 2672
2651 2673 t4_os_pci_read_cfg1(sc, PCI_CONF_CAP_PTR, &cap_ptr);
2652 2674 while (cap_ptr) {
2653 2675 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_ID, &cap_id);
2654 2676 if (cap_id == cap)
2655 2677 return (cap_ptr); /* found */
2656 2678 t4_os_pci_read_cfg1(sc, cap_ptr + PCI_CAP_NEXT_PTR, &cap_ptr);
2657 2679 }
2658 2680
2659 2681 return (0); /* not found */
2660 2682 }
2661 2683
2662 2684 void
2663 2685 t4_os_portmod_changed(const struct adapter *sc, int idx)
2664 2686 {
2665 2687 static const char *mod_str[] = {
2666 2688 NULL, "LR", "SR", "ER", "TWINAX", "active TWINAX", "LRM"
2667 2689 };
2668 2690 const struct port_info *pi = sc->port[idx];
2669 2691
2670 2692 if (pi->mod_type == FW_PORT_MOD_TYPE_NONE)
2671 2693 cxgb_printf(pi->dip, CE_NOTE, "transceiver unplugged.");
2672 2694 else if (pi->mod_type == FW_PORT_MOD_TYPE_UNKNOWN)
2673 2695 cxgb_printf(pi->dip, CE_NOTE,
2674 2696 "unknown transceiver inserted.\n");
2675 2697 else if (pi->mod_type == FW_PORT_MOD_TYPE_NOTSUPPORTED)
2676 2698 cxgb_printf(pi->dip, CE_NOTE,
2677 2699 "unsupported transceiver inserted.\n");
2678 2700 else if (pi->mod_type > 0 && pi->mod_type < ARRAY_SIZE(mod_str))
2679 2701 cxgb_printf(pi->dip, CE_NOTE, "%s transceiver inserted.\n",
2680 2702 mod_str[pi->mod_type]);
2681 2703 else
2682 2704 cxgb_printf(pi->dip, CE_NOTE, "transceiver (type %d) inserted.",
2683 2705 pi->mod_type);
2684 2706 }
2685 2707
2686 2708 /* ARGSUSED */
2687 2709 static int
2688 2710 cpl_not_handled(struct sge_iq *iq, const struct rss_header *rss, mblk_t *m)
2689 2711 {
2690 2712 if (m != NULL)
2691 2713 freemsg(m);
2692 2714 return (0);
2693 2715 }
2694 2716
2695 2717 int
2696 2718 t4_register_cpl_handler(struct adapter *sc, int opcode, cpl_handler_t h)
2697 2719 {
2698 2720 uint_t *loc, new;
2699 2721
2700 2722 if (opcode >= ARRAY_SIZE(sc->cpl_handler))
2701 2723 return (EINVAL);
2702 2724
2703 2725 new = (uint_t)(unsigned long) (h ? h : cpl_not_handled);
2704 2726 loc = (uint_t *)&sc->cpl_handler[opcode];
2705 2727 (void) atomic_swap_uint(loc, new);
2706 2728
2707 2729 return (0);
2708 2730 }
2709 2731
2710 2732 static int
2711 2733 fw_msg_not_handled(struct adapter *sc, const __be64 *data)
2712 2734 {
2713 2735 struct cpl_fw6_msg *cpl = container_of(data, struct cpl_fw6_msg, data);
2714 2736
2715 2737 cxgb_printf(sc->dip, CE_WARN, "%s fw_msg type %d", __func__, cpl->type);
2716 2738 return (0);
2717 2739 }
2718 2740
2719 2741 int
2720 2742 t4_register_fw_msg_handler(struct adapter *sc, int type, fw_msg_handler_t h)
2721 2743 {
2722 2744 fw_msg_handler_t *loc, new;
2723 2745
2724 2746 if (type >= ARRAY_SIZE(sc->fw_msg_handler))
2725 2747 return (EINVAL);
2726 2748
2727 2749 /*
2728 2750 * These are dispatched by the handler for FW{4|6}_CPL_MSG using the CPL
2729 2751 * handler dispatch table. Reject any attempt to install a handler for
2730 2752 * this subtype.
2731 2753 */
2732 2754 if (type == FW_TYPE_RSSCPL || type == FW6_TYPE_RSSCPL)
2733 2755 return (EINVAL);
2734 2756
2735 2757 new = h ? h : fw_msg_not_handled;
2736 2758 loc = &sc->fw_msg_handler[type];
2737 2759 (void)atomic_swap_ptr(loc, (void *)new);
2738 2760
2739 2761 return (0);
2740 2762 }
2741 2763
2742 2764 #ifdef TCP_OFFLOAD_ENABLE
2743 2765 static int
2744 2766 toe_capability(struct port_info *pi, int enable)
2745 2767 {
2746 2768 int rc;
2747 2769 struct adapter *sc = pi->adapter;
2748 2770
2749 2771 if (!is_offload(sc))
2750 2772 return (ENODEV);
2751 2773
2752 2774 if (enable != 0) {
2753 2775 if (isset(&sc->offload_map, pi->port_id) != 0)
2754 2776 return (0);
2755 2777
2756 2778 if (sc->offload_map == 0) {
2757 2779 rc = activate_uld(sc, ULD_TOM, &sc->tom);
2758 2780 if (rc != 0)
2759 2781 return (rc);
2760 2782 }
2761 2783
2762 2784 setbit(&sc->offload_map, pi->port_id);
2763 2785 } else {
2764 2786 if (!isset(&sc->offload_map, pi->port_id))
2765 2787 return (0);
2766 2788
2767 2789 clrbit(&sc->offload_map, pi->port_id);
2768 2790
2769 2791 if (sc->offload_map == 0) {
2770 2792 rc = deactivate_uld(&sc->tom);
2771 2793 if (rc != 0) {
2772 2794 setbit(&sc->offload_map, pi->port_id);
2773 2795 return (rc);
2774 2796 }
2775 2797 }
2776 2798 }
2777 2799
2778 2800 return (0);
2779 2801 }
2780 2802
2781 2803 /*
2782 2804 * Add an upper layer driver to the global list.
2783 2805 */
2784 2806 int
2785 2807 t4_register_uld(struct uld_info *ui)
2786 2808 {
2787 2809 int rc = 0;
2788 2810 struct uld_info *u;
2789 2811
2790 2812 mutex_enter(&t4_uld_list_lock);
2791 2813 SLIST_FOREACH(u, &t4_uld_list, link) {
2792 2814 if (u->uld_id == ui->uld_id) {
2793 2815 rc = EEXIST;
2794 2816 goto done;
2795 2817 }
2796 2818 }
2797 2819
2798 2820 SLIST_INSERT_HEAD(&t4_uld_list, ui, link);
2799 2821 ui->refcount = 0;
2800 2822 done:
2801 2823 mutex_exit(&t4_uld_list_lock);
2802 2824 return (rc);
2803 2825 }
2804 2826
2805 2827 int
2806 2828 t4_unregister_uld(struct uld_info *ui)
2807 2829 {
2808 2830 int rc = EINVAL;
2809 2831 struct uld_info *u;
2810 2832
2811 2833 mutex_enter(&t4_uld_list_lock);
2812 2834
2813 2835 SLIST_FOREACH(u, &t4_uld_list, link) {
2814 2836 if (u == ui) {
2815 2837 if (ui->refcount > 0) {
2816 2838 rc = EBUSY;
2817 2839 goto done;
2818 2840 }
2819 2841
2820 2842 SLIST_REMOVE(&t4_uld_list, ui, uld_info, link);
2821 2843 rc = 0;
2822 2844 goto done;
2823 2845 }
2824 2846 }
2825 2847 done:
2826 2848 mutex_exit(&t4_uld_list_lock);
2827 2849 return (rc);
2828 2850 }
2829 2851
2830 2852 static int
2831 2853 activate_uld(struct adapter *sc, int id, struct uld_softc *usc)
2832 2854 {
2833 2855 int rc = EAGAIN;
2834 2856 struct uld_info *ui;
2835 2857
2836 2858 mutex_enter(&t4_uld_list_lock);
2837 2859
2838 2860 SLIST_FOREACH(ui, &t4_uld_list, link) {
2839 2861 if (ui->uld_id == id) {
2840 2862 rc = ui->attach(sc, &usc->softc);
2841 2863 if (rc == 0) {
2842 2864 ASSERT(usc->softc != NULL);
2843 2865 ui->refcount++;
2844 2866 usc->uld = ui;
2845 2867 }
2846 2868 goto done;
2847 2869 }
2848 2870 }
2849 2871 done:
2850 2872 mutex_exit(&t4_uld_list_lock);
2851 2873
2852 2874 return (rc);
2853 2875 }
2854 2876
2855 2877 static int
2856 2878 deactivate_uld(struct uld_softc *usc)
2857 2879 {
2858 2880 int rc;
2859 2881
2860 2882 mutex_enter(&t4_uld_list_lock);
2861 2883
2862 2884 if (usc->uld == NULL || usc->softc == NULL) {
2863 2885 rc = EINVAL;
2864 2886 goto done;
2865 2887 }
2866 2888
2867 2889 rc = usc->uld->detach(usc->softc);
2868 2890 if (rc == 0) {
2869 2891 ASSERT(usc->uld->refcount > 0);
2870 2892 usc->uld->refcount--;
2871 2893 usc->uld = NULL;
2872 2894 usc->softc = NULL;
2873 2895 }
2874 2896 done:
2875 2897 mutex_exit(&t4_uld_list_lock);
2876 2898
2877 2899 return (rc);
2878 2900 }
2879 2901
2880 2902 void
2881 2903 t4_iterate(void (*func)(int, void *), void *arg)
2882 2904 {
2883 2905 struct adapter *sc;
2884 2906
2885 2907 mutex_enter(&t4_adapter_list_lock);
2886 2908 SLIST_FOREACH(sc, &t4_adapter_list, link) {
2887 2909 /*
2888 2910 * func should not make any assumptions about what state sc is
2889 2911 * in - the only guarantee is that sc->sc_lock is a valid lock.
2890 2912 */
2891 2913 func(ddi_get_instance(sc->dip), arg);
2892 2914 }
2893 2915 mutex_exit(&t4_adapter_list_lock);
2894 2916 }
2895 2917
2896 2918 #endif
|
↓ open down ↓ |
2048 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX