299 if (cmd != DDI_ATTACH)
300 return (DDI_FAILURE);
301
302 /*
303 * Allocate space for soft state.
304 */
305 instance = ddi_get_instance(dip);
306 rc = ddi_soft_state_zalloc(t4_list, instance);
307 if (rc != DDI_SUCCESS) {
308 cxgb_printf(dip, CE_WARN,
309 "failed to allocate soft state: %d", rc);
310 return (DDI_FAILURE);
311 }
312
313 sc = ddi_get_soft_state(t4_list, instance);
314 sc->dip = dip;
315 sc->dev = makedevice(ddi_driver_major(dip), instance);
316 mutex_init(&sc->lock, NULL, MUTEX_DRIVER, NULL);
317 cv_init(&sc->cv, NULL, CV_DRIVER, NULL);
318 mutex_init(&sc->sfl_lock, NULL, MUTEX_DRIVER, NULL);
319
320 mutex_enter(&t4_adapter_list_lock);
321 SLIST_INSERT_HEAD(&t4_adapter_list, sc, link);
322 mutex_exit(&t4_adapter_list_lock);
323
324 sc->pf = getpf(sc);
325 if (sc->pf > 8) {
326 rc = EINVAL;
327 cxgb_printf(dip, CE_WARN,
328 "failed to determine PCI PF# of device");
329 goto done;
330 }
331 sc->mbox = sc->pf;
332
333 /* Initialize the driver properties */
334 prp = &sc->props;
335 (void)init_driver_props(sc, prp);
336
337 /*
338 * Enable access to the PCI config space.
352 rc = ddi_regs_map_setup(dip, 1, &sc->regp, 0, 0, &da, &sc->regh);
353 if (rc != DDI_SUCCESS) {
354 cxgb_printf(dip, CE_WARN,
355 "failed to map device registers: %d", rc);
356 goto done;
357 }
358
359 (void) memset(sc->chan_map, 0xff, sizeof (sc->chan_map));
360
361 /*
362 * Initialize cpl handler.
363 */
364 for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++) {
365 sc->cpl_handler[i] = cpl_not_handled;
366 }
367
368 for (i = 0; i < ARRAY_SIZE(sc->fw_msg_handler); i++) {
369 sc->fw_msg_handler[i] = fw_msg_not_handled;
370 }
371
372 /*
373 * Prepare the adapter for operation.
374 */
375 rc = -t4_prep_adapter(sc, false);
376 if (rc != 0) {
377 cxgb_printf(dip, CE_WARN, "failed to prepare adapter: %d", rc);
378 goto done;
379 }
380
381 /*
382 * Enable BAR1 access.
383 */
384 sc->doorbells |= DOORBELL_KDB;
385 rc = ddi_regs_map_setup(dip, 2, &sc->reg1p, 0, 0, &da1, &sc->reg1h);
386 if (rc != DDI_SUCCESS) {
387 cxgb_printf(dip, CE_WARN,
388 "failed to map BAR1 device registers: %d", rc);
389 goto done;
390 } else {
391 if (is_t5(sc->params.chip)) {
819
820 instance = ddi_get_instance(dip);
821 sc = ddi_get_soft_state(t4_list, instance);
822 if (sc == NULL)
823 return (DDI_SUCCESS);
824
825 if (sc->flags & FULL_INIT_DONE) {
826 t4_intr_disable(sc);
827 for_each_port(sc, i) {
828 pi = sc->port[i];
829 if (pi && pi->flags & PORT_INIT_DONE)
830 (void) port_full_uninit(pi);
831 }
832 (void) adapter_full_uninit(sc);
833 }
834
835 /* Safe to call no matter what */
836 ddi_prop_remove_all(dip);
837 ddi_remove_minor_node(dip, NULL);
838
839 if (sc->ksp != NULL)
840 kstat_delete(sc->ksp);
841 if (sc->ksp_stat != NULL)
842 kstat_delete(sc->ksp_stat);
843
844 s = &sc->sge;
845 if (s->rxq != NULL)
846 kmem_free(s->rxq, s->nrxq * sizeof (struct sge_rxq));
847 #ifdef TCP_OFFLOAD_ENABLE
848 if (s->ofld_txq != NULL)
849 kmem_free(s->ofld_txq, s->nofldtxq * sizeof (struct sge_wrq));
850 if (s->ofld_rxq != NULL)
851 kmem_free(s->ofld_rxq,
852 s->nofldrxq * sizeof (struct sge_ofld_rxq));
853 if (s->ctrlq != NULL)
854 kmem_free(s->ctrlq,
855 sc->params.nports * sizeof (struct sge_wrq));
856 #endif
857 if (s->txq != NULL)
858 kmem_free(s->txq, s->ntxq * sizeof (struct sge_txq));
|
299 if (cmd != DDI_ATTACH)
300 return (DDI_FAILURE);
301
302 /*
303 * Allocate space for soft state.
304 */
305 instance = ddi_get_instance(dip);
306 rc = ddi_soft_state_zalloc(t4_list, instance);
307 if (rc != DDI_SUCCESS) {
308 cxgb_printf(dip, CE_WARN,
309 "failed to allocate soft state: %d", rc);
310 return (DDI_FAILURE);
311 }
312
313 sc = ddi_get_soft_state(t4_list, instance);
314 sc->dip = dip;
315 sc->dev = makedevice(ddi_driver_major(dip), instance);
316 mutex_init(&sc->lock, NULL, MUTEX_DRIVER, NULL);
317 cv_init(&sc->cv, NULL, CV_DRIVER, NULL);
318 mutex_init(&sc->sfl_lock, NULL, MUTEX_DRIVER, NULL);
319 TAILQ_INIT(&sc->sfl);
320
321 mutex_enter(&t4_adapter_list_lock);
322 SLIST_INSERT_HEAD(&t4_adapter_list, sc, link);
323 mutex_exit(&t4_adapter_list_lock);
324
325 sc->pf = getpf(sc);
326 if (sc->pf > 8) {
327 rc = EINVAL;
328 cxgb_printf(dip, CE_WARN,
329 "failed to determine PCI PF# of device");
330 goto done;
331 }
332 sc->mbox = sc->pf;
333
334 /* Initialize the driver properties */
335 prp = &sc->props;
336 (void)init_driver_props(sc, prp);
337
338 /*
339 * Enable access to the PCI config space.
353 rc = ddi_regs_map_setup(dip, 1, &sc->regp, 0, 0, &da, &sc->regh);
354 if (rc != DDI_SUCCESS) {
355 cxgb_printf(dip, CE_WARN,
356 "failed to map device registers: %d", rc);
357 goto done;
358 }
359
360 (void) memset(sc->chan_map, 0xff, sizeof (sc->chan_map));
361
362 /*
363 * Initialize cpl handler.
364 */
365 for (i = 0; i < ARRAY_SIZE(sc->cpl_handler); i++) {
366 sc->cpl_handler[i] = cpl_not_handled;
367 }
368
369 for (i = 0; i < ARRAY_SIZE(sc->fw_msg_handler); i++) {
370 sc->fw_msg_handler[i] = fw_msg_not_handled;
371 }
372
373 for (i = 0; i < NCHAN; i++) {
374 (void) snprintf(name, sizeof (name), "%s-%d",
375 "reclaim", i);
376 sc->tq[i] = ddi_taskq_create(sc->dip,
377 name, 1, TASKQ_DEFAULTPRI, 0);
378
379 if (sc->tq[i] == NULL) {
380 cxgb_printf(dip, CE_WARN,
381 "failed to create task queues");
382 rc = DDI_FAILURE;
383 goto done;
384 }
385 }
386
387 /*
388 * Prepare the adapter for operation.
389 */
390 rc = -t4_prep_adapter(sc, false);
391 if (rc != 0) {
392 cxgb_printf(dip, CE_WARN, "failed to prepare adapter: %d", rc);
393 goto done;
394 }
395
396 /*
397 * Enable BAR1 access.
398 */
399 sc->doorbells |= DOORBELL_KDB;
400 rc = ddi_regs_map_setup(dip, 2, &sc->reg1p, 0, 0, &da1, &sc->reg1h);
401 if (rc != DDI_SUCCESS) {
402 cxgb_printf(dip, CE_WARN,
403 "failed to map BAR1 device registers: %d", rc);
404 goto done;
405 } else {
406 if (is_t5(sc->params.chip)) {
834
835 instance = ddi_get_instance(dip);
836 sc = ddi_get_soft_state(t4_list, instance);
837 if (sc == NULL)
838 return (DDI_SUCCESS);
839
840 if (sc->flags & FULL_INIT_DONE) {
841 t4_intr_disable(sc);
842 for_each_port(sc, i) {
843 pi = sc->port[i];
844 if (pi && pi->flags & PORT_INIT_DONE)
845 (void) port_full_uninit(pi);
846 }
847 (void) adapter_full_uninit(sc);
848 }
849
850 /* Safe to call no matter what */
851 ddi_prop_remove_all(dip);
852 ddi_remove_minor_node(dip, NULL);
853
854 for (i = 0; i < NCHAN; i++) {
855 if (sc->tq[i]) {
856 ddi_taskq_wait(sc->tq[i]);
857 ddi_taskq_destroy(sc->tq[i]);
858 }
859 }
860
861 if (sc->ksp != NULL)
862 kstat_delete(sc->ksp);
863 if (sc->ksp_stat != NULL)
864 kstat_delete(sc->ksp_stat);
865
866 s = &sc->sge;
867 if (s->rxq != NULL)
868 kmem_free(s->rxq, s->nrxq * sizeof (struct sge_rxq));
869 #ifdef TCP_OFFLOAD_ENABLE
870 if (s->ofld_txq != NULL)
871 kmem_free(s->ofld_txq, s->nofldtxq * sizeof (struct sge_wrq));
872 if (s->ofld_rxq != NULL)
873 kmem_free(s->ofld_rxq,
874 s->nofldrxq * sizeof (struct sge_ofld_rxq));
875 if (s->ctrlq != NULL)
876 kmem_free(s->ctrlq,
877 sc->params.nports * sizeof (struct sge_wrq));
878 #endif
879 if (s->txq != NULL)
880 kmem_free(s->txq, s->ntxq * sizeof (struct sge_txq));
|