Index: sys/dev/nvme/nvme_ctrlr.c =================================================================== --- sys/dev/nvme/nvme_ctrlr.c (revision 337062) +++ sys/dev/nvme/nvme_ctrlr.c (working copy) @@ -53,6 +53,8 @@ struct nvme_async_event_request *aer); static void nvme_ctrlr_setup_interrupts(struct nvme_controller *ctrlr); +static void nvme_ctrlr_observe(void *arg); + static int nvme_ctrlr_allocate_bar(struct nvme_controller *ctrlr) { @@ -265,14 +267,57 @@ return (0); } +static void +nvme_ctrlr_observe(void *arg) +{ + /* printf("observe enter\n"); */ + struct nvme_controller *ctrlr = arg; + //struct nvme_qpair *qpair; + uint32_t csts; + uint8_t cfs; + //int i; + + + csts = nvme_mmio_read_4(ctrlr, csts); + cfs = (csts >> NVME_CSTS_REG_CFS_SHIFT) & NVME_CSTS_REG_CFS_MASK; + + if (cfs != 0) { + printf("CONTROLLER HAS CRASHED!!! cfs = %d csts=%x\n", cfs, csts); + + printf("controller addr %p", ctrlr); +/* + printf("Queue dump:\n"); + + printf("Adminq:\n"); + qpair = &ctrlr->adminq; + nvme_dump_queue(qpair); + + printf("\n\nioqs:\n"); + for (i = 0 ; i < ctrlr->num_io_queues; i++) { + qpair = &ctrlr->ioq[i]; + nvme_dump_queue(qpair); + } +*/ + + panic("uhoh"); + } + + /* reschedule. */ + callout_schedule(&ctrlr->observer, 1 * hz); +} + static int nvme_ctrlr_disable(struct nvme_controller *ctrlr) { + printf("CTRL DISABLE\n"); uint32_t cc; uint32_t csts; uint8_t en, rdy; int err; + printf("CALLOUT_STOP\n"); + callout_stop(&ctrlr->observer); + cc = nvme_mmio_read_4(ctrlr, cc); csts = nvme_mmio_read_4(ctrlr, csts); @@ -315,6 +360,7 @@ static int nvme_ctrlr_enable(struct nvme_controller *ctrlr) { + printf("CTRL ENABLE\n"); uint32_t cc; uint32_t csts; uint32_t aqa; @@ -377,6 +423,7 @@ int nvme_ctrlr_hw_reset(struct nvme_controller *ctrlr) { + printf("CTRL HW RESET\n"); int i, err; nvme_admin_qpair_disable(&ctrlr->adminq); @@ -401,6 +448,7 @@ void nvme_ctrlr_reset(struct nvme_controller *ctrlr) { + printf("CTRL RESET\n"); int cmpset; cmpset = atomic_cmpset_32(&ctrlr->is_resetting, 0, 1); @@ -891,6 +939,11 @@ for (i = 0; i < ctrlr->num_io_queues; i++) nvme_io_qpair_enable(&ctrlr->ioq[i]); + + printf("CALLOUT STARTUP\n"); + callout_stop(&ctrlr->observer); + /* Start observation 45 seconds after init. */ + callout_reset(&ctrlr->observer, 45 * hz, nvme_ctrlr_observe, ctrlr); } void @@ -955,7 +1008,7 @@ } /* - * Poll the single-vector intertrupt case: num_io_queues will be 1 and + * Poll the single-vector interrupt case: num_io_queues will be 1 and * there's only a single vector. While we're polling, we mask further * interrupts in the controller. */ @@ -1232,6 +1285,9 @@ mtx_init(&ctrlr->lock, "nvme ctrlr lock", NULL, MTX_DEF); + printf("CALLOUT_INIT!\n"); + callout_init(&ctrlr->observer, 0); + status = nvme_ctrlr_allocate_bar(ctrlr); if (status != 0) Index: sys/dev/nvme/nvme_private.h =================================================================== --- sys/dev/nvme/nvme_private.h (revision 337062) +++ sys/dev/nvme/nvme_private.h (working copy) @@ -279,6 +279,8 @@ struct task fail_req_task; struct taskqueue *taskqueue; + struct callout observer; + /* For shared legacy interrupt. */ int rid; struct resource *res; @@ -449,6 +451,7 @@ void nvme_ns_destruct(struct nvme_namespace *ns); void nvme_sysctl_initialize_ctrlr(struct nvme_controller *ctrlr); +void nvme_dump_queue(struct nvme_qpair *qpair); void nvme_dump_command(struct nvme_command *cmd); void nvme_dump_completion(struct nvme_completion *cpl); Index: sys/dev/nvme/nvme_qpair.c =================================================================== --- sys/dev/nvme/nvme_qpair.c (revision 337062) +++ sys/dev/nvme/nvme_qpair.c (working copy) @@ -387,6 +387,8 @@ nvme_qpair_print_completion(qpair, cpl); } + //if (!atomic_cmpset_ptr(qpair->act_tr[cpl->cid], tr, NULL)) +// panic("Mismatched act_tr\n"); qpair->act_tr[cpl->cid] = NULL; KASSERT(cpl->cid == req->cmd.cid, ("cpl cid does not match cmd cid\n")); @@ -488,6 +490,7 @@ return (false); while (1) { + /* Copy completion */ cpl = qpair->cpl[qpair->cq_head]; /* Convert to host endian */ @@ -496,10 +499,12 @@ if (NVME_STATUS_GET_P(cpl.status) != qpair->phase) break; + tr = NULL; tr = qpair->act_tr[cpl.cid]; if (tr != NULL) { nvme_qpair_complete_tracker(qpair, tr, &cpl, TRUE); + /* For sysctl readout only */ qpair->sq_head = cpl.sqhd; done++; } else { @@ -556,9 +561,12 @@ qpair->res = bus_alloc_resource_any(ctrlr->dev, SYS_RES_IRQ, &qpair->rid, RF_ACTIVE); - bus_setup_intr(ctrlr->dev, qpair->res, + if (qpair->res == NULL) + panic("Unable to allocate IRQ!"); + if (bus_setup_intr(ctrlr->dev, qpair->res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, - nvme_qpair_msix_handler, qpair, &qpair->tag); + nvme_qpair_msix_handler, qpair, &qpair->tag)) + panic("Unable to setup interrupt!"); } mtx_init(&qpair->lock, "nvme qpair lock", NULL, MTX_DEF); @@ -595,7 +603,7 @@ } if (bus_dmamem_alloc(qpair->dma_tag, (void **)&queuemem, - BUS_DMA_NOWAIT, &qpair->queuemem_map)) { + BUS_DMA_NOWAIT | BUS_DMA_COHERENT, &qpair->queuemem_map)) { nvme_printf(ctrlr, "failed to alloc qpair memory\n"); goto out; } @@ -810,6 +818,8 @@ req = tr->req; req->cmd.cid = tr->cid; +// if(!atomic_cmpset_ptr(qpair->act_tr[tr->cid], NULL, tr)) +// panic("Transaction fence error\n"); qpair->act_tr[tr->cid] = tr; ctrlr = qpair->ctrlr; Index: sys/dev/nvme/nvme_sysctl.c =================================================================== --- sys/dev/nvme/nvme_sysctl.c (revision 337062) +++ sys/dev/nvme/nvme_sysctl.c (working copy) @@ -29,6 +29,7 @@ #include __FBSDID("$FreeBSD$"); +#include "opt_ddb.h" #include "opt_nvme.h" #include @@ -37,6 +38,10 @@ #include "nvme_private.h" +#ifdef DDB +#include +#endif + #ifndef NVME_USE_NVD #define NVME_USE_NVD 1 #endif @@ -57,7 +62,7 @@ #define sysctl_handle_64 sysctl_handle_quad #endif -static void +void nvme_dump_queue(struct nvme_qpair *qpair) { struct nvme_completion *cpl; @@ -313,3 +318,60 @@ que_tree); } } + +#ifdef DDB +static void +db_print_nvme_controller(struct nvme_controller *ctrlr) +{ + struct nvme_qpair *qpair; + uint32_t csts; + uint32_t db; + int i; + + if (ctrlr != NULL) { + db_printf("dev: %p", (void*) &ctrlr->dev); + + csts = nvme_mmio_read_4(ctrlr, csts); + db_printf(" csts: %x", csts); + + db_printf("\n Admin queue: \n"); + qpair = &ctrlr->adminq; + db = nvme_mmio_read_4(ctrlr, doorbell[qpair->id].sq_tdbl); + db_printf("qpair: %d sq_tdbl: %d", qpair->id, db); + db = nvme_mmio_read_4(ctrlr, doorbell[qpair->id].cq_hdbl); + + nvme_dump_queue(qpair); + + db_printf("\n ioqs: \n"); + + for (i = 0 ; i < ctrlr->num_io_queues; i++) { + qpair = &ctrlr->ioq[i]; + db = nvme_mmio_read_4(ctrlr, doorbell[qpair->id].sq_tdbl); + db_printf("qpair: %d sq_tdbl: %d", qpair->id, db); + db = nvme_mmio_read_4(ctrlr, doorbell[qpair->id].cq_hdbl); + db_printf(" cq_hdbl: %d", db); + db_printf(" qp_sq_head: %d qp_sq_tail: %d qp_cq_head: %d", qpair->sq_head, qpair->sq_tail, qpair->cq_head); + + nvme_dump_queue(qpair); + } + db_printf("\n"); + } + + return; +} + +DB_SHOW_COMMAND(nvme_controller, db_show_nvme_controller) +{ + struct nvme_controller *ctrlr; + + if (!have_addr) { + db_printf("usage: show nvme_controller \n"); + return; + } + + ctrlr = (struct nvme_controller *)addr; + db_print_nvme_controller(ctrlr); +} + + +#endif