Print this page
11969 Attempting to attach an invalid nvme namespace will cause a panic
Reviewed by: Robert Mustacchi <rm+illumos@fingolfin.org>
Reviewed by: Jerry Jelinek <jerry.jelinek@joyent.com>
| Split |
Close |
| Expand all |
| Collapse all |
--- old/usr/src/uts/common/io/nvme/nvme.c
+++ new/usr/src/uts/common/io/nvme/nvme.c
1 1 /*
2 2 * This file and its contents are supplied under the terms of the
3 3 * Common Development and Distribution License ("CDDL"), version 1.0.
4 4 * You may only use this file in accordance with the terms of version
5 5 * 1.0 of the CDDL.
6 6 *
7 7 * A full copy of the text of the CDDL should have accompanied this
8 8 * source. A copy of the CDDL is also available via the Internet at
9 9 * http://www.illumos.org/license/CDDL.
10 10 */
11 11
12 12 /*
13 13 * Copyright 2018 Nexenta Systems, Inc.
14 14 * Copyright 2016 Tegile Systems, Inc. All rights reserved.
15 15 * Copyright (c) 2016 The MathWorks, Inc. All rights reserved.
16 16 * Copyright 2020 Joyent, Inc.
17 17 * Copyright 2019 Western Digital Corporation.
18 18 * Copyright 2020 Racktop Systems.
19 19 */
20 20
21 21 /*
22 22 * blkdev driver for NVMe compliant storage devices
23 23 *
24 24 * This driver was written to conform to version 1.2.1 of the NVMe
25 25 * specification. It may work with newer versions, but that is completely
26 26 * untested and disabled by default.
27 27 *
28 28 * The driver has only been tested on x86 systems and will not work on big-
29 29 * endian systems without changes to the code accessing registers and data
30 30 * structures used by the hardware.
31 31 *
32 32 *
33 33 * Interrupt Usage:
34 34 *
35 35 * The driver will use a single interrupt while configuring the device as the
36 36 * specification requires, but contrary to the specification it will try to use
37 37 * a single-message MSI(-X) or FIXED interrupt. Later in the attach process it
38 38 * will switch to multiple-message MSI(-X) if supported. The driver wants to
39 39 * have one interrupt vector per CPU, but it will work correctly if less are
40 40 * available. Interrupts can be shared by queues, the interrupt handler will
41 41 * iterate through the I/O queue array by steps of n_intr_cnt. Usually only
42 42 * the admin queue will share an interrupt with one I/O queue. The interrupt
43 43 * handler will retrieve completed commands from all queues sharing an interrupt
44 44 * vector and will post them to a taskq for completion processing.
45 45 *
46 46 *
47 47 * Command Processing:
48 48 *
49 49 * NVMe devices can have up to 65535 I/O queue pairs, with each queue holding up
50 50 * to 65536 I/O commands. The driver will configure one I/O queue pair per
51 51 * available interrupt vector, with the queue length usually much smaller than
52 52 * the maximum of 65536. If the hardware doesn't provide enough queues, fewer
53 53 * interrupt vectors will be used.
54 54 *
55 55 * Additionally the hardware provides a single special admin queue pair that can
56 56 * hold up to 4096 admin commands.
57 57 *
58 58 * From the hardware perspective both queues of a queue pair are independent,
59 59 * but they share some driver state: the command array (holding pointers to
60 60 * commands currently being processed by the hardware) and the active command
61 61 * counter. Access to a submission queue and the shared state is protected by
62 62 * nq_mutex, completion queue is protected by ncq_mutex.
63 63 *
64 64 * When a command is submitted to a queue pair the active command counter is
65 65 * incremented and a pointer to the command is stored in the command array. The
66 66 * array index is used as command identifier (CID) in the submission queue
67 67 * entry. Some commands may take a very long time to complete, and if the queue
68 68 * wraps around in that time a submission may find the next array slot to still
69 69 * be used by a long-running command. In this case the array is sequentially
70 70 * searched for the next free slot. The length of the command array is the same
71 71 * as the configured queue length. Queue overrun is prevented by the semaphore,
72 72 * so a command submission may block if the queue is full.
73 73 *
74 74 *
75 75 * Polled I/O Support:
76 76 *
77 77 * For kernel core dump support the driver can do polled I/O. As interrupts are
78 78 * turned off while dumping the driver will just submit a command in the regular
79 79 * way, and then repeatedly attempt a command retrieval until it gets the
80 80 * command back.
81 81 *
82 82 *
83 83 * Namespace Support:
84 84 *
85 85 * NVMe devices can have multiple namespaces, each being a independent data
86 86 * store. The driver supports multiple namespaces and creates a blkdev interface
87 87 * for each namespace found. Namespaces can have various attributes to support
88 88 * protection information. This driver does not support any of this and ignores
89 89 * namespaces that have these attributes.
90 90 *
91 91 * As of NVMe 1.1 namespaces can have an 64bit Extended Unique Identifier
92 92 * (EUI64). This driver uses the EUI64 if present to generate the devid and
93 93 * passes it to blkdev to use it in the device node names. As this is currently
94 94 * untested namespaces with EUI64 are ignored by default.
95 95 *
96 96 * We currently support only (2 << NVME_MINOR_INST_SHIFT) - 2 namespaces in a
97 97 * single controller. This is an artificial limit imposed by the driver to be
98 98 * able to address a reasonable number of controllers and namespaces using a
99 99 * 32bit minor node number.
100 100 *
101 101 *
102 102 * Minor nodes:
103 103 *
104 104 * For each NVMe device the driver exposes one minor node for the controller and
105 105 * one minor node for each namespace. The only operations supported by those
106 106 * minor nodes are open(9E), close(9E), and ioctl(9E). This serves as the
107 107 * interface for the nvmeadm(1M) utility.
108 108 *
109 109 *
110 110 * Blkdev Interface:
111 111 *
112 112 * This driver uses blkdev to do all the heavy lifting involved with presenting
113 113 * a disk device to the system. As a result, the processing of I/O requests is
114 114 * relatively simple as blkdev takes care of partitioning, boundary checks, DMA
115 115 * setup, and splitting of transfers into manageable chunks.
116 116 *
117 117 * I/O requests coming in from blkdev are turned into NVM commands and posted to
118 118 * an I/O queue. The queue is selected by taking the CPU id modulo the number of
119 119 * queues. There is currently no timeout handling of I/O commands.
120 120 *
121 121 * Blkdev also supports querying device/media information and generating a
122 122 * devid. The driver reports the best block size as determined by the namespace
123 123 * format back to blkdev as physical block size to support partition and block
124 124 * alignment. The devid is either based on the namespace EUI64, if present, or
125 125 * composed using the device vendor ID, model number, serial number, and the
126 126 * namespace ID.
127 127 *
128 128 *
129 129 * Error Handling:
130 130 *
131 131 * Error handling is currently limited to detecting fatal hardware errors,
132 132 * either by asynchronous events, or synchronously through command status or
133 133 * admin command timeouts. In case of severe errors the device is fenced off,
134 134 * all further requests will return EIO. FMA is then called to fault the device.
135 135 *
136 136 * The hardware has a limit for outstanding asynchronous event requests. Before
137 137 * this limit is known the driver assumes it is at least 1 and posts a single
138 138 * asynchronous request. Later when the limit is known more asynchronous event
139 139 * requests are posted to allow quicker reception of error information. When an
140 140 * asynchronous event is posted by the hardware the driver will parse the error
141 141 * status fields and log information or fault the device, depending on the
142 142 * severity of the asynchronous event. The asynchronous event request is then
143 143 * reused and posted to the admin queue again.
144 144 *
145 145 * On command completion the command status is checked for errors. In case of
146 146 * errors indicating a driver bug the driver panics. Almost all other error
147 147 * status values just cause EIO to be returned.
148 148 *
149 149 * Command timeouts are currently detected for all admin commands except
150 150 * asynchronous event requests. If a command times out and the hardware appears
151 151 * to be healthy the driver attempts to abort the command. The original command
152 152 * timeout is also applied to the abort command. If the abort times out too the
153 153 * driver assumes the device to be dead, fences it off, and calls FMA to retire
154 154 * it. In all other cases the aborted command should return immediately with a
155 155 * status indicating it was aborted, and the driver will wait indefinitely for
156 156 * that to happen. No timeout handling of normal I/O commands is presently done.
157 157 *
158 158 * Any command that times out due to the controller dropping dead will be put on
159 159 * nvme_lost_cmds list if it references DMA memory. This will prevent the DMA
160 160 * memory being reused by the system and later be written to by a "dead" NVMe
161 161 * controller.
162 162 *
163 163 *
164 164 * Locking:
165 165 *
166 166 * Each queue pair has a nq_mutex and ncq_mutex. The nq_mutex must be held
167 167 * when accessing shared state and submission queue registers, ncq_mutex
168 168 * is held when accessing completion queue state and registers.
169 169 * Callers of nvme_unqueue_cmd() must make sure that nq_mutex is held, while
170 170 * nvme_submit_{admin,io}_cmd() and nvme_retrieve_cmd() take care of both
171 171 * mutexes themselves.
172 172 *
173 173 * Each command also has its own nc_mutex, which is associated with the
174 174 * condition variable nc_cv. It is only used on admin commands which are run
175 175 * synchronously. In that case it must be held across calls to
176 176 * nvme_submit_{admin,io}_cmd() and nvme_wait_cmd(), which is taken care of by
177 177 * nvme_admin_cmd(). It must also be held whenever the completion state of the
178 178 * command is changed or while a admin command timeout is handled.
179 179 *
180 180 * If both nc_mutex and nq_mutex must be held, nc_mutex must be acquired first.
181 181 * More than one nc_mutex may only be held when aborting commands. In this case,
182 182 * the nc_mutex of the command to be aborted must be held across the call to
183 183 * nvme_abort_cmd() to prevent the command from completing while the abort is in
184 184 * progress.
185 185 *
186 186 * If both nq_mutex and ncq_mutex need to be held, ncq_mutex must be
187 187 * acquired first. More than one nq_mutex is never held by a single thread.
188 188 * The ncq_mutex is only held by nvme_retrieve_cmd() and
189 189 * nvme_process_iocq(). nvme_process_iocq() is only called from the
190 190 * interrupt thread and nvme_retrieve_cmd() during polled I/O, so the
191 191 * mutex is non-contentious but is required for implementation completeness
192 192 * and safety.
193 193 *
194 194 * Each minor node has its own nm_mutex, which protects the open count nm_ocnt
195 195 * and exclusive-open flag nm_oexcl.
196 196 *
197 197 *
198 198 * Quiesce / Fast Reboot:
199 199 *
200 200 * The driver currently does not support fast reboot. A quiesce(9E) entry point
201 201 * is still provided which is used to send a shutdown notification to the
202 202 * device.
203 203 *
204 204 *
205 205 * DDI UFM Support
206 206 *
207 207 * The driver supports the DDI UFM framework for reporting information about
208 208 * the device's firmware image and slot configuration. This data can be
209 209 * queried by userland software via ioctls to the ufm driver. For more
210 210 * information, see ddi_ufm(9E).
211 211 *
212 212 *
213 213 * Driver Configuration:
214 214 *
215 215 * The following driver properties can be changed to control some aspects of the
216 216 * drivers operation:
217 217 * - strict-version: can be set to 0 to allow devices conforming to newer
218 218 * major versions to be used
219 219 * - ignore-unknown-vendor-status: can be set to 1 to not handle any vendor
220 220 * specific command status as a fatal error leading device faulting
221 221 * - admin-queue-len: the maximum length of the admin queue (16-4096)
222 222 * - io-squeue-len: the maximum length of the I/O submission queues (16-65536)
223 223 * - io-cqueue-len: the maximum length of the I/O completion queues (16-65536)
224 224 * - async-event-limit: the maximum number of asynchronous event requests to be
225 225 * posted by the driver
226 226 * - volatile-write-cache-enable: can be set to 0 to disable the volatile write
227 227 * cache
228 228 * - min-phys-block-size: the minimum physical block size to report to blkdev,
229 229 * which is among other things the basis for ZFS vdev ashift
230 230 * - max-submission-queues: the maximum number of I/O submission queues.
231 231 * - max-completion-queues: the maximum number of I/O completion queues,
232 232 * can be less than max-submission-queues, in which case the completion
233 233 * queues are shared.
234 234 *
235 235 *
236 236 * TODO:
237 237 * - figure out sane default for I/O queue depth reported to blkdev
238 238 * - FMA handling of media errors
239 239 * - support for devices supporting very large I/O requests using chained PRPs
240 240 * - support for configuring hardware parameters like interrupt coalescing
241 241 * - support for media formatting and hard partitioning into namespaces
242 242 * - support for big-endian systems
243 243 * - support for fast reboot
244 244 * - support for NVMe Subsystem Reset (1.1)
245 245 * - support for Scatter/Gather lists (1.1)
246 246 * - support for Reservations (1.1)
247 247 * - support for power management
248 248 */
249 249
250 250 #include <sys/byteorder.h>
251 251 #ifdef _BIG_ENDIAN
252 252 #error nvme driver needs porting for big-endian platforms
253 253 #endif
254 254
255 255 #include <sys/modctl.h>
256 256 #include <sys/conf.h>
257 257 #include <sys/devops.h>
258 258 #include <sys/ddi.h>
259 259 #include <sys/ddi_ufm.h>
260 260 #include <sys/sunddi.h>
261 261 #include <sys/sunndi.h>
262 262 #include <sys/bitmap.h>
263 263 #include <sys/sysmacros.h>
264 264 #include <sys/param.h>
265 265 #include <sys/varargs.h>
266 266 #include <sys/cpuvar.h>
267 267 #include <sys/disp.h>
268 268 #include <sys/blkdev.h>
269 269 #include <sys/atomic.h>
270 270 #include <sys/archsystm.h>
271 271 #include <sys/sata/sata_hba.h>
272 272 #include <sys/stat.h>
273 273 #include <sys/policy.h>
274 274 #include <sys/list.h>
275 275 #include <sys/dkio.h>
276 276
277 277 #include <sys/nvme.h>
278 278
279 279 #ifdef __x86
280 280 #include <sys/x86_archext.h>
281 281 #endif
282 282
283 283 #include "nvme_reg.h"
284 284 #include "nvme_var.h"
285 285
286 286 /*
287 287 * Assertions to make sure that we've properly captured various aspects of the
288 288 * packed structures and haven't broken them during updates.
289 289 */
290 290 CTASSERT(sizeof (nvme_identify_ctrl_t) == 0x1000);
291 291 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oacs) == 256);
292 292 CTASSERT(offsetof(nvme_identify_ctrl_t, id_sqes) == 512);
293 293 CTASSERT(offsetof(nvme_identify_ctrl_t, id_oncs) == 520);
294 294 CTASSERT(offsetof(nvme_identify_ctrl_t, id_subnqn) == 768);
295 295 CTASSERT(offsetof(nvme_identify_ctrl_t, id_nvmof) == 1792);
296 296 CTASSERT(offsetof(nvme_identify_ctrl_t, id_psd) == 2048);
297 297 CTASSERT(offsetof(nvme_identify_ctrl_t, id_vs) == 3072);
298 298
299 299 CTASSERT(sizeof (nvme_identify_nsid_t) == 0x1000);
300 300 CTASSERT(offsetof(nvme_identify_nsid_t, id_fpi) == 32);
301 301 CTASSERT(offsetof(nvme_identify_nsid_t, id_anagrpid) == 92);
302 302 CTASSERT(offsetof(nvme_identify_nsid_t, id_nguid) == 104);
303 303 CTASSERT(offsetof(nvme_identify_nsid_t, id_lbaf) == 128);
304 304 CTASSERT(offsetof(nvme_identify_nsid_t, id_vs) == 384);
305 305
306 306 CTASSERT(sizeof (nvme_identify_primary_caps_t) == 0x1000);
307 307 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vqfrt) == 32);
308 308 CTASSERT(offsetof(nvme_identify_primary_caps_t, nipc_vifrt) == 64);
309 309
310 310
311 311 /* NVMe spec version supported */
312 312 static const int nvme_version_major = 1;
313 313
314 314 /* tunable for admin command timeout in seconds, default is 1s */
315 315 int nvme_admin_cmd_timeout = 1;
316 316
317 317 /* tunable for FORMAT NVM command timeout in seconds, default is 600s */
318 318 int nvme_format_cmd_timeout = 600;
319 319
320 320 /* tunable for firmware commit with NVME_FWC_SAVE, default is 15s */
321 321 int nvme_commit_save_cmd_timeout = 15;
322 322
323 323 static int nvme_attach(dev_info_t *, ddi_attach_cmd_t);
324 324 static int nvme_detach(dev_info_t *, ddi_detach_cmd_t);
325 325 static int nvme_quiesce(dev_info_t *);
326 326 static int nvme_fm_errcb(dev_info_t *, ddi_fm_error_t *, const void *);
327 327 static int nvme_setup_interrupts(nvme_t *, int, int);
328 328 static void nvme_release_interrupts(nvme_t *);
329 329 static uint_t nvme_intr(caddr_t, caddr_t);
330 330
331 331 static void nvme_shutdown(nvme_t *, int, boolean_t);
332 332 static boolean_t nvme_reset(nvme_t *, boolean_t);
333 333 static int nvme_init(nvme_t *);
334 334 static nvme_cmd_t *nvme_alloc_cmd(nvme_t *, int);
335 335 static void nvme_free_cmd(nvme_cmd_t *);
336 336 static nvme_cmd_t *nvme_create_nvm_cmd(nvme_namespace_t *, uint8_t,
337 337 bd_xfer_t *);
338 338 static void nvme_admin_cmd(nvme_cmd_t *, int);
339 339 static void nvme_submit_admin_cmd(nvme_qpair_t *, nvme_cmd_t *);
340 340 static int nvme_submit_io_cmd(nvme_qpair_t *, nvme_cmd_t *);
341 341 static void nvme_submit_cmd_common(nvme_qpair_t *, nvme_cmd_t *);
342 342 static nvme_cmd_t *nvme_unqueue_cmd(nvme_t *, nvme_qpair_t *, int);
343 343 static nvme_cmd_t *nvme_retrieve_cmd(nvme_t *, nvme_qpair_t *);
344 344 static void nvme_wait_cmd(nvme_cmd_t *, uint_t);
345 345 static void nvme_wakeup_cmd(void *);
346 346 static void nvme_async_event_task(void *);
347 347
348 348 static int nvme_check_unknown_cmd_status(nvme_cmd_t *);
349 349 static int nvme_check_vendor_cmd_status(nvme_cmd_t *);
350 350 static int nvme_check_integrity_cmd_status(nvme_cmd_t *);
351 351 static int nvme_check_specific_cmd_status(nvme_cmd_t *);
352 352 static int nvme_check_generic_cmd_status(nvme_cmd_t *);
353 353 static inline int nvme_check_cmd_status(nvme_cmd_t *);
354 354
355 355 static int nvme_abort_cmd(nvme_cmd_t *, uint_t);
356 356 static void nvme_async_event(nvme_t *);
357 357 static int nvme_format_nvm(nvme_t *, boolean_t, uint32_t, uint8_t, boolean_t,
358 358 uint8_t, boolean_t, uint8_t);
359 359 static int nvme_get_logpage(nvme_t *, boolean_t, void **, size_t *, uint8_t,
360 360 ...);
361 361 static int nvme_identify(nvme_t *, boolean_t, uint32_t, void **);
362 362 static int nvme_set_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t,
363 363 uint32_t *);
364 364 static int nvme_get_features(nvme_t *, boolean_t, uint32_t, uint8_t, uint32_t *,
365 365 void **, size_t *);
366 366 static int nvme_write_cache_set(nvme_t *, boolean_t);
367 367 static int nvme_set_nqueues(nvme_t *);
368 368
369 369 static void nvme_free_dma(nvme_dma_t *);
370 370 static int nvme_zalloc_dma(nvme_t *, size_t, uint_t, ddi_dma_attr_t *,
371 371 nvme_dma_t **);
372 372 static int nvme_zalloc_queue_dma(nvme_t *, uint32_t, uint16_t, uint_t,
373 373 nvme_dma_t **);
374 374 static void nvme_free_qpair(nvme_qpair_t *);
375 375 static int nvme_alloc_qpair(nvme_t *, uint32_t, nvme_qpair_t **, uint_t);
376 376 static int nvme_create_io_qpair(nvme_t *, nvme_qpair_t *, uint16_t);
377 377
378 378 static inline void nvme_put64(nvme_t *, uintptr_t, uint64_t);
379 379 static inline void nvme_put32(nvme_t *, uintptr_t, uint32_t);
380 380 static inline uint64_t nvme_get64(nvme_t *, uintptr_t);
381 381 static inline uint32_t nvme_get32(nvme_t *, uintptr_t);
382 382
383 383 static boolean_t nvme_check_regs_hdl(nvme_t *);
384 384 static boolean_t nvme_check_dma_hdl(nvme_dma_t *);
385 385
386 386 static int nvme_fill_prp(nvme_cmd_t *, bd_xfer_t *);
387 387
388 388 static void nvme_bd_xfer_done(void *);
389 389 static void nvme_bd_driveinfo(void *, bd_drive_t *);
390 390 static int nvme_bd_mediainfo(void *, bd_media_t *);
391 391 static int nvme_bd_cmd(nvme_namespace_t *, bd_xfer_t *, uint8_t);
392 392 static int nvme_bd_read(void *, bd_xfer_t *);
393 393 static int nvme_bd_write(void *, bd_xfer_t *);
394 394 static int nvme_bd_sync(void *, bd_xfer_t *);
395 395 static int nvme_bd_devid(void *, dev_info_t *, ddi_devid_t *);
396 396 static int nvme_bd_free_space(void *, bd_xfer_t *);
397 397
398 398 static int nvme_prp_dma_constructor(void *, void *, int);
399 399 static void nvme_prp_dma_destructor(void *, void *);
400 400
401 401 static void nvme_prepare_devid(nvme_t *, uint32_t);
402 402
403 403 /* DDI UFM callbacks */
404 404 static int nvme_ufm_fill_image(ddi_ufm_handle_t *, void *, uint_t,
405 405 ddi_ufm_image_t *);
406 406 static int nvme_ufm_fill_slot(ddi_ufm_handle_t *, void *, uint_t, uint_t,
407 407 ddi_ufm_slot_t *);
408 408 static int nvme_ufm_getcaps(ddi_ufm_handle_t *, void *, ddi_ufm_cap_t *);
409 409
410 410 static int nvme_open(dev_t *, int, int, cred_t *);
411 411 static int nvme_close(dev_t, int, int, cred_t *);
412 412 static int nvme_ioctl(dev_t, int, intptr_t, int, cred_t *, int *);
413 413
414 414 static ddi_ufm_ops_t nvme_ufm_ops = {
415 415 NULL,
416 416 nvme_ufm_fill_image,
417 417 nvme_ufm_fill_slot,
418 418 nvme_ufm_getcaps
419 419 };
420 420
421 421 #define NVME_MINOR_INST_SHIFT 9
422 422 #define NVME_MINOR(inst, nsid) (((inst) << NVME_MINOR_INST_SHIFT) | (nsid))
423 423 #define NVME_MINOR_INST(minor) ((minor) >> NVME_MINOR_INST_SHIFT)
424 424 #define NVME_MINOR_NSID(minor) ((minor) & ((1 << NVME_MINOR_INST_SHIFT) - 1))
425 425 #define NVME_MINOR_MAX (NVME_MINOR(1, 0) - 2)
426 426
427 427 static void *nvme_state;
428 428 static kmem_cache_t *nvme_cmd_cache;
429 429
430 430 /*
431 431 * DMA attributes for queue DMA memory
432 432 *
433 433 * Queue DMA memory must be page aligned. The maximum length of a queue is
434 434 * 65536 entries, and an entry can be 64 bytes long.
435 435 */
436 436 static ddi_dma_attr_t nvme_queue_dma_attr = {
437 437 .dma_attr_version = DMA_ATTR_V0,
438 438 .dma_attr_addr_lo = 0,
439 439 .dma_attr_addr_hi = 0xffffffffffffffffULL,
440 440 .dma_attr_count_max = (UINT16_MAX + 1) * sizeof (nvme_sqe_t) - 1,
441 441 .dma_attr_align = 0x1000,
442 442 .dma_attr_burstsizes = 0x7ff,
443 443 .dma_attr_minxfer = 0x1000,
444 444 .dma_attr_maxxfer = (UINT16_MAX + 1) * sizeof (nvme_sqe_t),
445 445 .dma_attr_seg = 0xffffffffffffffffULL,
446 446 .dma_attr_sgllen = 1,
447 447 .dma_attr_granular = 1,
448 448 .dma_attr_flags = 0,
449 449 };
450 450
451 451 /*
452 452 * DMA attributes for transfers using Physical Region Page (PRP) entries
453 453 *
454 454 * A PRP entry describes one page of DMA memory using the page size specified
455 455 * in the controller configuration's memory page size register (CC.MPS). It uses
456 456 * a 64bit base address aligned to this page size. There is no limitation on
457 457 * chaining PRPs together for arbitrarily large DMA transfers.
458 458 */
459 459 static ddi_dma_attr_t nvme_prp_dma_attr = {
460 460 .dma_attr_version = DMA_ATTR_V0,
461 461 .dma_attr_addr_lo = 0,
462 462 .dma_attr_addr_hi = 0xffffffffffffffffULL,
463 463 .dma_attr_count_max = 0xfff,
464 464 .dma_attr_align = 0x1000,
465 465 .dma_attr_burstsizes = 0x7ff,
466 466 .dma_attr_minxfer = 0x1000,
467 467 .dma_attr_maxxfer = 0x1000,
468 468 .dma_attr_seg = 0xfff,
469 469 .dma_attr_sgllen = -1,
470 470 .dma_attr_granular = 1,
471 471 .dma_attr_flags = 0,
472 472 };
473 473
474 474 /*
475 475 * DMA attributes for transfers using scatter/gather lists
476 476 *
477 477 * A SGL entry describes a chunk of DMA memory using a 64bit base address and a
478 478 * 32bit length field. SGL Segment and SGL Last Segment entries require the
479 479 * length to be a multiple of 16 bytes.
480 480 */
481 481 static ddi_dma_attr_t nvme_sgl_dma_attr = {
482 482 .dma_attr_version = DMA_ATTR_V0,
483 483 .dma_attr_addr_lo = 0,
484 484 .dma_attr_addr_hi = 0xffffffffffffffffULL,
485 485 .dma_attr_count_max = 0xffffffffUL,
486 486 .dma_attr_align = 1,
487 487 .dma_attr_burstsizes = 0x7ff,
488 488 .dma_attr_minxfer = 0x10,
489 489 .dma_attr_maxxfer = 0xfffffffffULL,
490 490 .dma_attr_seg = 0xffffffffffffffffULL,
491 491 .dma_attr_sgllen = -1,
492 492 .dma_attr_granular = 0x10,
493 493 .dma_attr_flags = 0
494 494 };
495 495
496 496 static ddi_device_acc_attr_t nvme_reg_acc_attr = {
497 497 .devacc_attr_version = DDI_DEVICE_ATTR_V0,
498 498 .devacc_attr_endian_flags = DDI_STRUCTURE_LE_ACC,
499 499 .devacc_attr_dataorder = DDI_STRICTORDER_ACC
500 500 };
501 501
502 502 static struct cb_ops nvme_cb_ops = {
503 503 .cb_open = nvme_open,
504 504 .cb_close = nvme_close,
505 505 .cb_strategy = nodev,
506 506 .cb_print = nodev,
507 507 .cb_dump = nodev,
508 508 .cb_read = nodev,
509 509 .cb_write = nodev,
510 510 .cb_ioctl = nvme_ioctl,
511 511 .cb_devmap = nodev,
512 512 .cb_mmap = nodev,
513 513 .cb_segmap = nodev,
514 514 .cb_chpoll = nochpoll,
515 515 .cb_prop_op = ddi_prop_op,
516 516 .cb_str = 0,
517 517 .cb_flag = D_NEW | D_MP,
518 518 .cb_rev = CB_REV,
519 519 .cb_aread = nodev,
520 520 .cb_awrite = nodev
521 521 };
522 522
523 523 static struct dev_ops nvme_dev_ops = {
524 524 .devo_rev = DEVO_REV,
525 525 .devo_refcnt = 0,
526 526 .devo_getinfo = ddi_no_info,
527 527 .devo_identify = nulldev,
528 528 .devo_probe = nulldev,
529 529 .devo_attach = nvme_attach,
530 530 .devo_detach = nvme_detach,
531 531 .devo_reset = nodev,
532 532 .devo_cb_ops = &nvme_cb_ops,
533 533 .devo_bus_ops = NULL,
534 534 .devo_power = NULL,
535 535 .devo_quiesce = nvme_quiesce,
536 536 };
537 537
538 538 static struct modldrv nvme_modldrv = {
539 539 .drv_modops = &mod_driverops,
540 540 .drv_linkinfo = "NVMe v1.1b",
541 541 .drv_dev_ops = &nvme_dev_ops
542 542 };
543 543
544 544 static struct modlinkage nvme_modlinkage = {
545 545 .ml_rev = MODREV_1,
546 546 .ml_linkage = { &nvme_modldrv, NULL }
547 547 };
548 548
549 549 static bd_ops_t nvme_bd_ops = {
550 550 .o_version = BD_OPS_CURRENT_VERSION,
551 551 .o_drive_info = nvme_bd_driveinfo,
552 552 .o_media_info = nvme_bd_mediainfo,
553 553 .o_devid_init = nvme_bd_devid,
554 554 .o_sync_cache = nvme_bd_sync,
555 555 .o_read = nvme_bd_read,
556 556 .o_write = nvme_bd_write,
557 557 .o_free_space = nvme_bd_free_space,
558 558 };
559 559
560 560 /*
561 561 * This list will hold commands that have timed out and couldn't be aborted.
562 562 * As we don't know what the hardware may still do with the DMA memory we can't
563 563 * free them, so we'll keep them forever on this list where we can easily look
564 564 * at them with mdb.
565 565 */
566 566 static struct list nvme_lost_cmds;
567 567 static kmutex_t nvme_lc_mutex;
568 568
569 569 int
570 570 _init(void)
571 571 {
572 572 int error;
573 573
574 574 error = ddi_soft_state_init(&nvme_state, sizeof (nvme_t), 1);
575 575 if (error != DDI_SUCCESS)
576 576 return (error);
577 577
578 578 nvme_cmd_cache = kmem_cache_create("nvme_cmd_cache",
579 579 sizeof (nvme_cmd_t), 64, NULL, NULL, NULL, NULL, NULL, 0);
580 580
581 581 mutex_init(&nvme_lc_mutex, NULL, MUTEX_DRIVER, NULL);
582 582 list_create(&nvme_lost_cmds, sizeof (nvme_cmd_t),
583 583 offsetof(nvme_cmd_t, nc_list));
584 584
585 585 bd_mod_init(&nvme_dev_ops);
586 586
587 587 error = mod_install(&nvme_modlinkage);
588 588 if (error != DDI_SUCCESS) {
589 589 ddi_soft_state_fini(&nvme_state);
590 590 mutex_destroy(&nvme_lc_mutex);
591 591 list_destroy(&nvme_lost_cmds);
592 592 bd_mod_fini(&nvme_dev_ops);
593 593 }
594 594
595 595 return (error);
596 596 }
597 597
598 598 int
599 599 _fini(void)
600 600 {
601 601 int error;
602 602
603 603 if (!list_is_empty(&nvme_lost_cmds))
604 604 return (DDI_FAILURE);
605 605
606 606 error = mod_remove(&nvme_modlinkage);
607 607 if (error == DDI_SUCCESS) {
608 608 ddi_soft_state_fini(&nvme_state);
609 609 kmem_cache_destroy(nvme_cmd_cache);
610 610 mutex_destroy(&nvme_lc_mutex);
611 611 list_destroy(&nvme_lost_cmds);
612 612 bd_mod_fini(&nvme_dev_ops);
613 613 }
614 614
615 615 return (error);
616 616 }
617 617
618 618 int
619 619 _info(struct modinfo *modinfop)
620 620 {
621 621 return (mod_info(&nvme_modlinkage, modinfop));
622 622 }
623 623
624 624 static inline void
625 625 nvme_put64(nvme_t *nvme, uintptr_t reg, uint64_t val)
626 626 {
627 627 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
628 628
629 629 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
630 630 ddi_put64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg), val);
631 631 }
632 632
633 633 static inline void
634 634 nvme_put32(nvme_t *nvme, uintptr_t reg, uint32_t val)
635 635 {
636 636 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
637 637
638 638 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
639 639 ddi_put32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg), val);
640 640 }
641 641
642 642 static inline uint64_t
643 643 nvme_get64(nvme_t *nvme, uintptr_t reg)
644 644 {
645 645 uint64_t val;
646 646
647 647 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x7) == 0);
648 648
649 649 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
650 650 val = ddi_get64(nvme->n_regh, (uint64_t *)(nvme->n_regs + reg));
651 651
652 652 return (val);
653 653 }
654 654
655 655 static inline uint32_t
656 656 nvme_get32(nvme_t *nvme, uintptr_t reg)
657 657 {
658 658 uint32_t val;
659 659
660 660 ASSERT(((uintptr_t)(nvme->n_regs + reg) & 0x3) == 0);
661 661
662 662 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
663 663 val = ddi_get32(nvme->n_regh, (uint32_t *)(nvme->n_regs + reg));
664 664
665 665 return (val);
666 666 }
667 667
668 668 static boolean_t
669 669 nvme_check_regs_hdl(nvme_t *nvme)
670 670 {
671 671 ddi_fm_error_t error;
672 672
673 673 ddi_fm_acc_err_get(nvme->n_regh, &error, DDI_FME_VERSION);
674 674
675 675 if (error.fme_status != DDI_FM_OK)
676 676 return (B_TRUE);
677 677
678 678 return (B_FALSE);
679 679 }
680 680
681 681 static boolean_t
682 682 nvme_check_dma_hdl(nvme_dma_t *dma)
683 683 {
684 684 ddi_fm_error_t error;
685 685
686 686 if (dma == NULL)
687 687 return (B_FALSE);
688 688
689 689 ddi_fm_dma_err_get(dma->nd_dmah, &error, DDI_FME_VERSION);
690 690
691 691 if (error.fme_status != DDI_FM_OK)
692 692 return (B_TRUE);
693 693
694 694 return (B_FALSE);
695 695 }
696 696
697 697 static void
698 698 nvme_free_dma_common(nvme_dma_t *dma)
699 699 {
700 700 if (dma->nd_dmah != NULL)
701 701 (void) ddi_dma_unbind_handle(dma->nd_dmah);
702 702 if (dma->nd_acch != NULL)
703 703 ddi_dma_mem_free(&dma->nd_acch);
704 704 if (dma->nd_dmah != NULL)
705 705 ddi_dma_free_handle(&dma->nd_dmah);
706 706 }
707 707
708 708 static void
709 709 nvme_free_dma(nvme_dma_t *dma)
710 710 {
711 711 nvme_free_dma_common(dma);
712 712 kmem_free(dma, sizeof (*dma));
713 713 }
714 714
715 715 /* ARGSUSED */
716 716 static void
717 717 nvme_prp_dma_destructor(void *buf, void *private)
718 718 {
719 719 nvme_dma_t *dma = (nvme_dma_t *)buf;
720 720
721 721 nvme_free_dma_common(dma);
722 722 }
723 723
724 724 static int
725 725 nvme_alloc_dma_common(nvme_t *nvme, nvme_dma_t *dma,
726 726 size_t len, uint_t flags, ddi_dma_attr_t *dma_attr)
727 727 {
728 728 if (ddi_dma_alloc_handle(nvme->n_dip, dma_attr, DDI_DMA_SLEEP, NULL,
729 729 &dma->nd_dmah) != DDI_SUCCESS) {
730 730 /*
731 731 * Due to DDI_DMA_SLEEP this can't be DDI_DMA_NORESOURCES, and
732 732 * the only other possible error is DDI_DMA_BADATTR which
733 733 * indicates a driver bug which should cause a panic.
734 734 */
735 735 dev_err(nvme->n_dip, CE_PANIC,
736 736 "!failed to get DMA handle, check DMA attributes");
737 737 return (DDI_FAILURE);
738 738 }
739 739
740 740 /*
741 741 * ddi_dma_mem_alloc() can only fail when DDI_DMA_NOSLEEP is specified
742 742 * or the flags are conflicting, which isn't the case here.
743 743 */
744 744 (void) ddi_dma_mem_alloc(dma->nd_dmah, len, &nvme->n_reg_acc_attr,
745 745 DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL, &dma->nd_memp,
746 746 &dma->nd_len, &dma->nd_acch);
747 747
748 748 if (ddi_dma_addr_bind_handle(dma->nd_dmah, NULL, dma->nd_memp,
749 749 dma->nd_len, flags | DDI_DMA_CONSISTENT, DDI_DMA_SLEEP, NULL,
750 750 &dma->nd_cookie, &dma->nd_ncookie) != DDI_DMA_MAPPED) {
751 751 dev_err(nvme->n_dip, CE_WARN,
752 752 "!failed to bind DMA memory");
753 753 atomic_inc_32(&nvme->n_dma_bind_err);
754 754 nvme_free_dma_common(dma);
755 755 return (DDI_FAILURE);
756 756 }
757 757
758 758 return (DDI_SUCCESS);
759 759 }
760 760
761 761 static int
762 762 nvme_zalloc_dma(nvme_t *nvme, size_t len, uint_t flags,
763 763 ddi_dma_attr_t *dma_attr, nvme_dma_t **ret)
764 764 {
765 765 nvme_dma_t *dma = kmem_zalloc(sizeof (nvme_dma_t), KM_SLEEP);
766 766
767 767 if (nvme_alloc_dma_common(nvme, dma, len, flags, dma_attr) !=
768 768 DDI_SUCCESS) {
769 769 *ret = NULL;
770 770 kmem_free(dma, sizeof (nvme_dma_t));
771 771 return (DDI_FAILURE);
772 772 }
773 773
774 774 bzero(dma->nd_memp, dma->nd_len);
775 775
776 776 *ret = dma;
777 777 return (DDI_SUCCESS);
778 778 }
779 779
780 780 /* ARGSUSED */
781 781 static int
782 782 nvme_prp_dma_constructor(void *buf, void *private, int flags)
783 783 {
784 784 nvme_dma_t *dma = (nvme_dma_t *)buf;
785 785 nvme_t *nvme = (nvme_t *)private;
786 786
787 787 dma->nd_dmah = NULL;
788 788 dma->nd_acch = NULL;
789 789
790 790 if (nvme_alloc_dma_common(nvme, dma, nvme->n_pagesize,
791 791 DDI_DMA_READ, &nvme->n_prp_dma_attr) != DDI_SUCCESS) {
792 792 return (-1);
793 793 }
794 794
795 795 ASSERT(dma->nd_ncookie == 1);
796 796
797 797 dma->nd_cached = B_TRUE;
798 798
799 799 return (0);
800 800 }
801 801
802 802 static int
803 803 nvme_zalloc_queue_dma(nvme_t *nvme, uint32_t nentry, uint16_t qe_len,
804 804 uint_t flags, nvme_dma_t **dma)
805 805 {
806 806 uint32_t len = nentry * qe_len;
807 807 ddi_dma_attr_t q_dma_attr = nvme->n_queue_dma_attr;
808 808
809 809 len = roundup(len, nvme->n_pagesize);
810 810
811 811 if (nvme_zalloc_dma(nvme, len, flags, &q_dma_attr, dma)
812 812 != DDI_SUCCESS) {
813 813 dev_err(nvme->n_dip, CE_WARN,
814 814 "!failed to get DMA memory for queue");
815 815 goto fail;
816 816 }
817 817
818 818 if ((*dma)->nd_ncookie != 1) {
819 819 dev_err(nvme->n_dip, CE_WARN,
820 820 "!got too many cookies for queue DMA");
821 821 goto fail;
822 822 }
823 823
824 824 return (DDI_SUCCESS);
825 825
826 826 fail:
827 827 if (*dma) {
828 828 nvme_free_dma(*dma);
829 829 *dma = NULL;
830 830 }
831 831
832 832 return (DDI_FAILURE);
833 833 }
834 834
835 835 static void
836 836 nvme_free_cq(nvme_cq_t *cq)
837 837 {
838 838 mutex_destroy(&cq->ncq_mutex);
839 839
840 840 if (cq->ncq_cmd_taskq != NULL)
841 841 taskq_destroy(cq->ncq_cmd_taskq);
842 842
843 843 if (cq->ncq_dma != NULL)
844 844 nvme_free_dma(cq->ncq_dma);
845 845
846 846 kmem_free(cq, sizeof (*cq));
847 847 }
848 848
849 849 static void
850 850 nvme_free_qpair(nvme_qpair_t *qp)
851 851 {
852 852 int i;
853 853
854 854 mutex_destroy(&qp->nq_mutex);
855 855 sema_destroy(&qp->nq_sema);
856 856
857 857 if (qp->nq_sqdma != NULL)
858 858 nvme_free_dma(qp->nq_sqdma);
859 859
860 860 if (qp->nq_active_cmds > 0)
861 861 for (i = 0; i != qp->nq_nentry; i++)
862 862 if (qp->nq_cmd[i] != NULL)
863 863 nvme_free_cmd(qp->nq_cmd[i]);
864 864
865 865 if (qp->nq_cmd != NULL)
866 866 kmem_free(qp->nq_cmd, sizeof (nvme_cmd_t *) * qp->nq_nentry);
867 867
868 868 kmem_free(qp, sizeof (nvme_qpair_t));
869 869 }
870 870
871 871 /*
872 872 * Destroy the pre-allocated cq array, but only free individual completion
873 873 * queues from the given starting index.
874 874 */
875 875 static void
876 876 nvme_destroy_cq_array(nvme_t *nvme, uint_t start)
877 877 {
878 878 uint_t i;
879 879
880 880 for (i = start; i < nvme->n_cq_count; i++)
881 881 if (nvme->n_cq[i] != NULL)
882 882 nvme_free_cq(nvme->n_cq[i]);
883 883
884 884 kmem_free(nvme->n_cq, sizeof (*nvme->n_cq) * nvme->n_cq_count);
885 885 }
886 886
887 887 static int
888 888 nvme_alloc_cq(nvme_t *nvme, uint32_t nentry, nvme_cq_t **cqp, uint16_t idx,
889 889 uint_t nthr)
890 890 {
891 891 nvme_cq_t *cq = kmem_zalloc(sizeof (*cq), KM_SLEEP);
892 892 char name[64]; /* large enough for the taskq name */
893 893
894 894 mutex_init(&cq->ncq_mutex, NULL, MUTEX_DRIVER,
895 895 DDI_INTR_PRI(nvme->n_intr_pri));
896 896
897 897 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_cqe_t),
898 898 DDI_DMA_READ, &cq->ncq_dma) != DDI_SUCCESS)
899 899 goto fail;
900 900
901 901 cq->ncq_cq = (nvme_cqe_t *)cq->ncq_dma->nd_memp;
902 902 cq->ncq_nentry = nentry;
903 903 cq->ncq_id = idx;
904 904 cq->ncq_hdbl = NVME_REG_CQHDBL(nvme, idx);
905 905
906 906 /*
907 907 * Each completion queue has its own command taskq.
908 908 */
909 909 (void) snprintf(name, sizeof (name), "%s%d_cmd_taskq%u",
910 910 ddi_driver_name(nvme->n_dip), ddi_get_instance(nvme->n_dip), idx);
911 911
912 912 cq->ncq_cmd_taskq = taskq_create(name, nthr, minclsyspri, 64, INT_MAX,
913 913 TASKQ_PREPOPULATE);
914 914
915 915 if (cq->ncq_cmd_taskq == NULL) {
916 916 dev_err(nvme->n_dip, CE_WARN, "!failed to create cmd "
917 917 "taskq for cq %u", idx);
918 918 goto fail;
919 919 }
920 920
921 921 *cqp = cq;
922 922 return (DDI_SUCCESS);
923 923
924 924 fail:
925 925 nvme_free_cq(cq);
926 926 *cqp = NULL;
927 927
928 928 return (DDI_FAILURE);
929 929 }
930 930
931 931 /*
932 932 * Create the n_cq array big enough to hold "ncq" completion queues.
933 933 * If the array already exists it will be re-sized (but only larger).
934 934 * The admin queue is included in this array, which boosts the
935 935 * max number of entries to UINT16_MAX + 1.
936 936 */
937 937 static int
938 938 nvme_create_cq_array(nvme_t *nvme, uint_t ncq, uint32_t nentry, uint_t nthr)
939 939 {
940 940 nvme_cq_t **cq;
941 941 uint_t i, cq_count;
942 942
943 943 ASSERT3U(ncq, >, nvme->n_cq_count);
944 944
945 945 cq = nvme->n_cq;
946 946 cq_count = nvme->n_cq_count;
947 947
948 948 nvme->n_cq = kmem_zalloc(sizeof (*nvme->n_cq) * ncq, KM_SLEEP);
949 949 nvme->n_cq_count = ncq;
950 950
951 951 for (i = 0; i < cq_count; i++)
952 952 nvme->n_cq[i] = cq[i];
953 953
954 954 for (; i < nvme->n_cq_count; i++)
955 955 if (nvme_alloc_cq(nvme, nentry, &nvme->n_cq[i], i, nthr) !=
956 956 DDI_SUCCESS)
957 957 goto fail;
958 958
959 959 if (cq != NULL)
960 960 kmem_free(cq, sizeof (*cq) * cq_count);
961 961
962 962 return (DDI_SUCCESS);
963 963
964 964 fail:
965 965 nvme_destroy_cq_array(nvme, cq_count);
966 966 /*
967 967 * Restore the original array
968 968 */
969 969 nvme->n_cq_count = cq_count;
970 970 nvme->n_cq = cq;
971 971
972 972 return (DDI_FAILURE);
973 973 }
974 974
975 975 static int
976 976 nvme_alloc_qpair(nvme_t *nvme, uint32_t nentry, nvme_qpair_t **nqp,
977 977 uint_t idx)
978 978 {
979 979 nvme_qpair_t *qp = kmem_zalloc(sizeof (*qp), KM_SLEEP);
980 980 uint_t cq_idx;
981 981
982 982 mutex_init(&qp->nq_mutex, NULL, MUTEX_DRIVER,
983 983 DDI_INTR_PRI(nvme->n_intr_pri));
984 984
985 985 /*
986 986 * The NVMe spec defines that a full queue has one empty (unused) slot;
987 987 * initialize the semaphore accordingly.
988 988 */
989 989 sema_init(&qp->nq_sema, nentry - 1, NULL, SEMA_DRIVER, NULL);
990 990
991 991 if (nvme_zalloc_queue_dma(nvme, nentry, sizeof (nvme_sqe_t),
992 992 DDI_DMA_WRITE, &qp->nq_sqdma) != DDI_SUCCESS)
993 993 goto fail;
994 994
995 995 /*
996 996 * idx == 0 is adminq, those above 0 are shared io completion queues.
997 997 */
998 998 cq_idx = idx == 0 ? 0 : 1 + (idx - 1) % (nvme->n_cq_count - 1);
999 999 qp->nq_cq = nvme->n_cq[cq_idx];
1000 1000 qp->nq_sq = (nvme_sqe_t *)qp->nq_sqdma->nd_memp;
1001 1001 qp->nq_nentry = nentry;
1002 1002
1003 1003 qp->nq_sqtdbl = NVME_REG_SQTDBL(nvme, idx);
1004 1004
1005 1005 qp->nq_cmd = kmem_zalloc(sizeof (nvme_cmd_t *) * nentry, KM_SLEEP);
1006 1006 qp->nq_next_cmd = 0;
1007 1007
1008 1008 *nqp = qp;
1009 1009 return (DDI_SUCCESS);
1010 1010
1011 1011 fail:
1012 1012 nvme_free_qpair(qp);
1013 1013 *nqp = NULL;
1014 1014
1015 1015 return (DDI_FAILURE);
1016 1016 }
1017 1017
1018 1018 static nvme_cmd_t *
1019 1019 nvme_alloc_cmd(nvme_t *nvme, int kmflag)
1020 1020 {
1021 1021 nvme_cmd_t *cmd = kmem_cache_alloc(nvme_cmd_cache, kmflag);
1022 1022
1023 1023 if (cmd == NULL)
1024 1024 return (cmd);
1025 1025
1026 1026 bzero(cmd, sizeof (nvme_cmd_t));
1027 1027
1028 1028 cmd->nc_nvme = nvme;
1029 1029
1030 1030 mutex_init(&cmd->nc_mutex, NULL, MUTEX_DRIVER,
1031 1031 DDI_INTR_PRI(nvme->n_intr_pri));
1032 1032 cv_init(&cmd->nc_cv, NULL, CV_DRIVER, NULL);
1033 1033
1034 1034 return (cmd);
1035 1035 }
1036 1036
1037 1037 static void
1038 1038 nvme_free_cmd(nvme_cmd_t *cmd)
1039 1039 {
1040 1040 /* Don't free commands on the lost commands list. */
1041 1041 if (list_link_active(&cmd->nc_list))
1042 1042 return;
1043 1043
1044 1044 if (cmd->nc_dma) {
1045 1045 if (cmd->nc_dma->nd_cached)
1046 1046 kmem_cache_free(cmd->nc_nvme->n_prp_cache,
1047 1047 cmd->nc_dma);
1048 1048 else
1049 1049 nvme_free_dma(cmd->nc_dma);
1050 1050 cmd->nc_dma = NULL;
1051 1051 }
1052 1052
1053 1053 cv_destroy(&cmd->nc_cv);
1054 1054 mutex_destroy(&cmd->nc_mutex);
1055 1055
1056 1056 kmem_cache_free(nvme_cmd_cache, cmd);
1057 1057 }
1058 1058
1059 1059 static void
1060 1060 nvme_submit_admin_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
1061 1061 {
1062 1062 sema_p(&qp->nq_sema);
1063 1063 nvme_submit_cmd_common(qp, cmd);
1064 1064 }
1065 1065
1066 1066 static int
1067 1067 nvme_submit_io_cmd(nvme_qpair_t *qp, nvme_cmd_t *cmd)
1068 1068 {
1069 1069 if (sema_tryp(&qp->nq_sema) == 0)
1070 1070 return (EAGAIN);
1071 1071
1072 1072 nvme_submit_cmd_common(qp, cmd);
1073 1073 return (0);
1074 1074 }
1075 1075
1076 1076 static void
1077 1077 nvme_submit_cmd_common(nvme_qpair_t *qp, nvme_cmd_t *cmd)
1078 1078 {
1079 1079 nvme_reg_sqtdbl_t tail = { 0 };
1080 1080
1081 1081 mutex_enter(&qp->nq_mutex);
1082 1082 cmd->nc_completed = B_FALSE;
1083 1083
1084 1084 /*
1085 1085 * Try to insert the cmd into the active cmd array at the nq_next_cmd
1086 1086 * slot. If the slot is already occupied advance to the next slot and
1087 1087 * try again. This can happen for long running commands like async event
1088 1088 * requests.
1089 1089 */
1090 1090 while (qp->nq_cmd[qp->nq_next_cmd] != NULL)
1091 1091 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
1092 1092 qp->nq_cmd[qp->nq_next_cmd] = cmd;
1093 1093
1094 1094 qp->nq_active_cmds++;
1095 1095
1096 1096 cmd->nc_sqe.sqe_cid = qp->nq_next_cmd;
1097 1097 bcopy(&cmd->nc_sqe, &qp->nq_sq[qp->nq_sqtail], sizeof (nvme_sqe_t));
1098 1098 (void) ddi_dma_sync(qp->nq_sqdma->nd_dmah,
1099 1099 sizeof (nvme_sqe_t) * qp->nq_sqtail,
1100 1100 sizeof (nvme_sqe_t), DDI_DMA_SYNC_FORDEV);
1101 1101 qp->nq_next_cmd = (qp->nq_next_cmd + 1) % qp->nq_nentry;
1102 1102
1103 1103 tail.b.sqtdbl_sqt = qp->nq_sqtail = (qp->nq_sqtail + 1) % qp->nq_nentry;
1104 1104 nvme_put32(cmd->nc_nvme, qp->nq_sqtdbl, tail.r);
1105 1105
1106 1106 mutex_exit(&qp->nq_mutex);
1107 1107 }
1108 1108
1109 1109 static nvme_cmd_t *
1110 1110 nvme_unqueue_cmd(nvme_t *nvme, nvme_qpair_t *qp, int cid)
1111 1111 {
1112 1112 nvme_cmd_t *cmd;
1113 1113
1114 1114 ASSERT(mutex_owned(&qp->nq_mutex));
1115 1115 ASSERT3S(cid, <, qp->nq_nentry);
1116 1116
1117 1117 cmd = qp->nq_cmd[cid];
1118 1118 qp->nq_cmd[cid] = NULL;
1119 1119 ASSERT3U(qp->nq_active_cmds, >, 0);
1120 1120 qp->nq_active_cmds--;
1121 1121 sema_v(&qp->nq_sema);
1122 1122
1123 1123 ASSERT3P(cmd, !=, NULL);
1124 1124 ASSERT3P(cmd->nc_nvme, ==, nvme);
1125 1125 ASSERT3S(cmd->nc_sqe.sqe_cid, ==, cid);
1126 1126
1127 1127 return (cmd);
1128 1128 }
1129 1129
1130 1130 /*
1131 1131 * Get the command tied to the next completed cqe and bump along completion
1132 1132 * queue head counter.
1133 1133 */
1134 1134 static nvme_cmd_t *
1135 1135 nvme_get_completed(nvme_t *nvme, nvme_cq_t *cq)
1136 1136 {
1137 1137 nvme_qpair_t *qp;
1138 1138 nvme_cqe_t *cqe;
1139 1139 nvme_cmd_t *cmd;
1140 1140
1141 1141 ASSERT(mutex_owned(&cq->ncq_mutex));
1142 1142
1143 1143 cqe = &cq->ncq_cq[cq->ncq_head];
1144 1144
1145 1145 /* Check phase tag of CQE. Hardware inverts it for new entries. */
1146 1146 if (cqe->cqe_sf.sf_p == cq->ncq_phase)
1147 1147 return (NULL);
1148 1148
1149 1149 qp = nvme->n_ioq[cqe->cqe_sqid];
1150 1150
1151 1151 mutex_enter(&qp->nq_mutex);
1152 1152 cmd = nvme_unqueue_cmd(nvme, qp, cqe->cqe_cid);
1153 1153 mutex_exit(&qp->nq_mutex);
1154 1154
1155 1155 ASSERT(cmd->nc_sqid == cqe->cqe_sqid);
1156 1156 bcopy(cqe, &cmd->nc_cqe, sizeof (nvme_cqe_t));
1157 1157
1158 1158 qp->nq_sqhead = cqe->cqe_sqhd;
1159 1159
1160 1160 cq->ncq_head = (cq->ncq_head + 1) % cq->ncq_nentry;
1161 1161
1162 1162 /* Toggle phase on wrap-around. */
1163 1163 if (cq->ncq_head == 0)
1164 1164 cq->ncq_phase = cq->ncq_phase ? 0 : 1;
1165 1165
1166 1166 return (cmd);
1167 1167 }
1168 1168
1169 1169 /*
1170 1170 * Process all completed commands on the io completion queue.
1171 1171 */
1172 1172 static uint_t
1173 1173 nvme_process_iocq(nvme_t *nvme, nvme_cq_t *cq)
1174 1174 {
1175 1175 nvme_reg_cqhdbl_t head = { 0 };
1176 1176 nvme_cmd_t *cmd;
1177 1177 uint_t completed = 0;
1178 1178
1179 1179 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
1180 1180 DDI_SUCCESS)
1181 1181 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
1182 1182 __func__);
1183 1183
1184 1184 mutex_enter(&cq->ncq_mutex);
1185 1185
1186 1186 while ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
1187 1187 taskq_dispatch_ent(cq->ncq_cmd_taskq, cmd->nc_callback, cmd,
1188 1188 TQ_NOSLEEP, &cmd->nc_tqent);
1189 1189
1190 1190 completed++;
1191 1191 }
1192 1192
1193 1193 if (completed > 0) {
1194 1194 /*
1195 1195 * Update the completion queue head doorbell.
1196 1196 */
1197 1197 head.b.cqhdbl_cqh = cq->ncq_head;
1198 1198 nvme_put32(nvme, cq->ncq_hdbl, head.r);
1199 1199 }
1200 1200
1201 1201 mutex_exit(&cq->ncq_mutex);
1202 1202
1203 1203 return (completed);
1204 1204 }
1205 1205
1206 1206 static nvme_cmd_t *
1207 1207 nvme_retrieve_cmd(nvme_t *nvme, nvme_qpair_t *qp)
1208 1208 {
1209 1209 nvme_cq_t *cq = qp->nq_cq;
1210 1210 nvme_reg_cqhdbl_t head = { 0 };
1211 1211 nvme_cmd_t *cmd;
1212 1212
1213 1213 if (ddi_dma_sync(cq->ncq_dma->nd_dmah, 0, 0, DDI_DMA_SYNC_FORKERNEL) !=
1214 1214 DDI_SUCCESS)
1215 1215 dev_err(nvme->n_dip, CE_WARN, "!ddi_dma_sync() failed in %s",
1216 1216 __func__);
1217 1217
1218 1218 mutex_enter(&cq->ncq_mutex);
1219 1219
1220 1220 if ((cmd = nvme_get_completed(nvme, cq)) != NULL) {
1221 1221 head.b.cqhdbl_cqh = cq->ncq_head;
1222 1222 nvme_put32(nvme, cq->ncq_hdbl, head.r);
1223 1223 }
1224 1224
1225 1225 mutex_exit(&cq->ncq_mutex);
1226 1226
1227 1227 return (cmd);
1228 1228 }
1229 1229
1230 1230 static int
1231 1231 nvme_check_unknown_cmd_status(nvme_cmd_t *cmd)
1232 1232 {
1233 1233 nvme_cqe_t *cqe = &cmd->nc_cqe;
1234 1234
1235 1235 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
1236 1236 "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
1237 1237 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
1238 1238 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
1239 1239 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
1240 1240
1241 1241 if (cmd->nc_xfer != NULL)
1242 1242 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1243 1243
1244 1244 if (cmd->nc_nvme->n_strict_version) {
1245 1245 cmd->nc_nvme->n_dead = B_TRUE;
1246 1246 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST);
1247 1247 }
1248 1248
1249 1249 return (EIO);
1250 1250 }
1251 1251
1252 1252 static int
1253 1253 nvme_check_vendor_cmd_status(nvme_cmd_t *cmd)
1254 1254 {
1255 1255 nvme_cqe_t *cqe = &cmd->nc_cqe;
1256 1256
1257 1257 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
1258 1258 "!unknown command status received: opc = %x, sqid = %d, cid = %d, "
1259 1259 "sc = %x, sct = %x, dnr = %d, m = %d", cmd->nc_sqe.sqe_opc,
1260 1260 cqe->cqe_sqid, cqe->cqe_cid, cqe->cqe_sf.sf_sc, cqe->cqe_sf.sf_sct,
1261 1261 cqe->cqe_sf.sf_dnr, cqe->cqe_sf.sf_m);
1262 1262 if (!cmd->nc_nvme->n_ignore_unknown_vendor_status) {
1263 1263 cmd->nc_nvme->n_dead = B_TRUE;
1264 1264 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST);
1265 1265 }
1266 1266
1267 1267 return (EIO);
1268 1268 }
1269 1269
1270 1270 static int
1271 1271 nvme_check_integrity_cmd_status(nvme_cmd_t *cmd)
1272 1272 {
1273 1273 nvme_cqe_t *cqe = &cmd->nc_cqe;
1274 1274
1275 1275 switch (cqe->cqe_sf.sf_sc) {
1276 1276 case NVME_CQE_SC_INT_NVM_WRITE:
1277 1277 /* write fail */
1278 1278 /* TODO: post ereport */
1279 1279 if (cmd->nc_xfer != NULL)
1280 1280 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
1281 1281 return (EIO);
1282 1282
1283 1283 case NVME_CQE_SC_INT_NVM_READ:
1284 1284 /* read fail */
1285 1285 /* TODO: post ereport */
1286 1286 if (cmd->nc_xfer != NULL)
1287 1287 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
1288 1288 return (EIO);
1289 1289
1290 1290 default:
1291 1291 return (nvme_check_unknown_cmd_status(cmd));
1292 1292 }
1293 1293 }
1294 1294
1295 1295 static int
1296 1296 nvme_check_generic_cmd_status(nvme_cmd_t *cmd)
1297 1297 {
1298 1298 nvme_cqe_t *cqe = &cmd->nc_cqe;
1299 1299
1300 1300 switch (cqe->cqe_sf.sf_sc) {
1301 1301 case NVME_CQE_SC_GEN_SUCCESS:
1302 1302 return (0);
1303 1303
1304 1304 /*
1305 1305 * Errors indicating a bug in the driver should cause a panic.
1306 1306 */
1307 1307 case NVME_CQE_SC_GEN_INV_OPC:
1308 1308 /* Invalid Command Opcode */
1309 1309 if (!cmd->nc_dontpanic)
1310 1310 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
1311 1311 "programming error: invalid opcode in cmd %p",
1312 1312 (void *)cmd);
1313 1313 return (EINVAL);
1314 1314
1315 1315 case NVME_CQE_SC_GEN_INV_FLD:
1316 1316 /* Invalid Field in Command */
1317 1317 if (!cmd->nc_dontpanic)
1318 1318 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
1319 1319 "programming error: invalid field in cmd %p",
1320 1320 (void *)cmd);
1321 1321 return (EIO);
1322 1322
1323 1323 case NVME_CQE_SC_GEN_ID_CNFL:
1324 1324 /* Command ID Conflict */
1325 1325 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1326 1326 "cmd ID conflict in cmd %p", (void *)cmd);
1327 1327 return (0);
1328 1328
1329 1329 case NVME_CQE_SC_GEN_INV_NS:
1330 1330 /* Invalid Namespace or Format */
1331 1331 if (!cmd->nc_dontpanic)
1332 1332 dev_err(cmd->nc_nvme->n_dip, CE_PANIC,
1333 1333 "programming error: invalid NS/format in cmd %p",
1334 1334 (void *)cmd);
1335 1335 return (EINVAL);
1336 1336
1337 1337 case NVME_CQE_SC_GEN_NVM_LBA_RANGE:
1338 1338 /* LBA Out Of Range */
1339 1339 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1340 1340 "LBA out of range in cmd %p", (void *)cmd);
1341 1341 return (0);
1342 1342
1343 1343 /*
1344 1344 * Non-fatal errors, handle gracefully.
1345 1345 */
1346 1346 case NVME_CQE_SC_GEN_DATA_XFR_ERR:
1347 1347 /* Data Transfer Error (DMA) */
1348 1348 /* TODO: post ereport */
1349 1349 atomic_inc_32(&cmd->nc_nvme->n_data_xfr_err);
1350 1350 if (cmd->nc_xfer != NULL)
1351 1351 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
1352 1352 return (EIO);
1353 1353
1354 1354 case NVME_CQE_SC_GEN_INTERNAL_ERR:
1355 1355 /*
1356 1356 * Internal Error. The spec (v1.0, section 4.5.1.2) says
1357 1357 * detailed error information is returned as async event,
1358 1358 * so we pretty much ignore the error here and handle it
1359 1359 * in the async event handler.
1360 1360 */
1361 1361 atomic_inc_32(&cmd->nc_nvme->n_internal_err);
1362 1362 if (cmd->nc_xfer != NULL)
1363 1363 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
1364 1364 return (EIO);
1365 1365
1366 1366 case NVME_CQE_SC_GEN_ABORT_REQUEST:
1367 1367 /*
1368 1368 * Command Abort Requested. This normally happens only when a
1369 1369 * command times out.
1370 1370 */
1371 1371 /* TODO: post ereport or change blkdev to handle this? */
1372 1372 atomic_inc_32(&cmd->nc_nvme->n_abort_rq_err);
1373 1373 return (ECANCELED);
1374 1374
1375 1375 case NVME_CQE_SC_GEN_ABORT_PWRLOSS:
1376 1376 /* Command Aborted due to Power Loss Notification */
1377 1377 ddi_fm_service_impact(cmd->nc_nvme->n_dip, DDI_SERVICE_LOST);
1378 1378 cmd->nc_nvme->n_dead = B_TRUE;
1379 1379 return (EIO);
1380 1380
1381 1381 case NVME_CQE_SC_GEN_ABORT_SQ_DEL:
1382 1382 /* Command Aborted due to SQ Deletion */
1383 1383 atomic_inc_32(&cmd->nc_nvme->n_abort_sq_del);
1384 1384 return (EIO);
1385 1385
1386 1386 case NVME_CQE_SC_GEN_NVM_CAP_EXC:
1387 1387 /* Capacity Exceeded */
1388 1388 atomic_inc_32(&cmd->nc_nvme->n_nvm_cap_exc);
1389 1389 if (cmd->nc_xfer != NULL)
1390 1390 bd_error(cmd->nc_xfer, BD_ERR_MEDIA);
1391 1391 return (EIO);
1392 1392
1393 1393 case NVME_CQE_SC_GEN_NVM_NS_NOTRDY:
1394 1394 /* Namespace Not Ready */
1395 1395 atomic_inc_32(&cmd->nc_nvme->n_nvm_ns_notrdy);
1396 1396 if (cmd->nc_xfer != NULL)
1397 1397 bd_error(cmd->nc_xfer, BD_ERR_NTRDY);
1398 1398 return (EIO);
1399 1399
1400 1400 default:
1401 1401 return (nvme_check_unknown_cmd_status(cmd));
1402 1402 }
1403 1403 }
1404 1404
1405 1405 static int
1406 1406 nvme_check_specific_cmd_status(nvme_cmd_t *cmd)
1407 1407 {
1408 1408 nvme_cqe_t *cqe = &cmd->nc_cqe;
1409 1409
1410 1410 switch (cqe->cqe_sf.sf_sc) {
1411 1411 case NVME_CQE_SC_SPC_INV_CQ:
1412 1412 /* Completion Queue Invalid */
1413 1413 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE);
1414 1414 atomic_inc_32(&cmd->nc_nvme->n_inv_cq_err);
1415 1415 return (EINVAL);
1416 1416
1417 1417 case NVME_CQE_SC_SPC_INV_QID:
1418 1418 /* Invalid Queue Identifier */
1419 1419 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
1420 1420 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_SQUEUE ||
1421 1421 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE ||
1422 1422 cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
1423 1423 atomic_inc_32(&cmd->nc_nvme->n_inv_qid_err);
1424 1424 return (EINVAL);
1425 1425
1426 1426 case NVME_CQE_SC_SPC_MAX_QSZ_EXC:
1427 1427 /* Max Queue Size Exceeded */
1428 1428 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_SQUEUE ||
1429 1429 cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
1430 1430 atomic_inc_32(&cmd->nc_nvme->n_max_qsz_exc);
1431 1431 return (EINVAL);
1432 1432
1433 1433 case NVME_CQE_SC_SPC_ABRT_CMD_EXC:
1434 1434 /* Abort Command Limit Exceeded */
1435 1435 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT);
1436 1436 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1437 1437 "abort command limit exceeded in cmd %p", (void *)cmd);
1438 1438 return (0);
1439 1439
1440 1440 case NVME_CQE_SC_SPC_ASYNC_EVREQ_EXC:
1441 1441 /* Async Event Request Limit Exceeded */
1442 1442 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_ASYNC_EVENT);
1443 1443 dev_err(cmd->nc_nvme->n_dip, CE_PANIC, "programming error: "
1444 1444 "async event request limit exceeded in cmd %p",
1445 1445 (void *)cmd);
1446 1446 return (0);
1447 1447
1448 1448 case NVME_CQE_SC_SPC_INV_INT_VECT:
1449 1449 /* Invalid Interrupt Vector */
1450 1450 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_CREATE_CQUEUE);
1451 1451 atomic_inc_32(&cmd->nc_nvme->n_inv_int_vect);
1452 1452 return (EINVAL);
1453 1453
1454 1454 case NVME_CQE_SC_SPC_INV_LOG_PAGE:
1455 1455 /* Invalid Log Page */
1456 1456 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_GET_LOG_PAGE);
1457 1457 atomic_inc_32(&cmd->nc_nvme->n_inv_log_page);
1458 1458 return (EINVAL);
1459 1459
1460 1460 case NVME_CQE_SC_SPC_INV_FORMAT:
1461 1461 /* Invalid Format */
1462 1462 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_FORMAT);
1463 1463 atomic_inc_32(&cmd->nc_nvme->n_inv_format);
1464 1464 if (cmd->nc_xfer != NULL)
1465 1465 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1466 1466 return (EINVAL);
1467 1467
1468 1468 case NVME_CQE_SC_SPC_INV_Q_DEL:
1469 1469 /* Invalid Queue Deletion */
1470 1470 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_DELETE_CQUEUE);
1471 1471 atomic_inc_32(&cmd->nc_nvme->n_inv_q_del);
1472 1472 return (EINVAL);
1473 1473
1474 1474 case NVME_CQE_SC_SPC_NVM_CNFL_ATTR:
1475 1475 /* Conflicting Attributes */
1476 1476 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_DSET_MGMT ||
1477 1477 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
1478 1478 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
1479 1479 atomic_inc_32(&cmd->nc_nvme->n_cnfl_attr);
1480 1480 if (cmd->nc_xfer != NULL)
1481 1481 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1482 1482 return (EINVAL);
1483 1483
1484 1484 case NVME_CQE_SC_SPC_NVM_INV_PROT:
1485 1485 /* Invalid Protection Information */
1486 1486 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_COMPARE ||
1487 1487 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_READ ||
1488 1488 cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
1489 1489 atomic_inc_32(&cmd->nc_nvme->n_inv_prot);
1490 1490 if (cmd->nc_xfer != NULL)
1491 1491 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1492 1492 return (EINVAL);
1493 1493
1494 1494 case NVME_CQE_SC_SPC_NVM_READONLY:
1495 1495 /* Write to Read Only Range */
1496 1496 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_NVM_WRITE);
1497 1497 atomic_inc_32(&cmd->nc_nvme->n_readonly);
1498 1498 if (cmd->nc_xfer != NULL)
1499 1499 bd_error(cmd->nc_xfer, BD_ERR_ILLRQ);
1500 1500 return (EROFS);
1501 1501
1502 1502 case NVME_CQE_SC_SPC_INV_FW_SLOT:
1503 1503 /* Invalid Firmware Slot */
1504 1504 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
1505 1505 return (EINVAL);
1506 1506
1507 1507 case NVME_CQE_SC_SPC_INV_FW_IMG:
1508 1508 /* Invalid Firmware Image */
1509 1509 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
1510 1510 return (EINVAL);
1511 1511
1512 1512 case NVME_CQE_SC_SPC_FW_RESET:
1513 1513 /* Conventional Reset Required */
1514 1514 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
1515 1515 return (0);
1516 1516
1517 1517 case NVME_CQE_SC_SPC_FW_NSSR:
1518 1518 /* NVMe Subsystem Reset Required */
1519 1519 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
1520 1520 return (0);
1521 1521
1522 1522 case NVME_CQE_SC_SPC_FW_NEXT_RESET:
1523 1523 /* Activation Requires Reset */
1524 1524 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
1525 1525 return (0);
1526 1526
1527 1527 case NVME_CQE_SC_SPC_FW_MTFA:
1528 1528 /* Activation Requires Maximum Time Violation */
1529 1529 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
1530 1530 return (EAGAIN);
1531 1531
1532 1532 case NVME_CQE_SC_SPC_FW_PROHIBITED:
1533 1533 /* Activation Prohibited */
1534 1534 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_ACTIVATE);
1535 1535 return (EINVAL);
1536 1536
1537 1537 case NVME_CQE_SC_SPC_FW_OVERLAP:
1538 1538 /* Overlapping Firmware Ranges */
1539 1539 ASSERT(cmd->nc_sqe.sqe_opc == NVME_OPC_FW_IMAGE_LOAD);
1540 1540 return (EINVAL);
1541 1541
1542 1542 default:
1543 1543 return (nvme_check_unknown_cmd_status(cmd));
1544 1544 }
1545 1545 }
1546 1546
1547 1547 static inline int
1548 1548 nvme_check_cmd_status(nvme_cmd_t *cmd)
1549 1549 {
1550 1550 nvme_cqe_t *cqe = &cmd->nc_cqe;
1551 1551
1552 1552 /*
1553 1553 * Take a shortcut if the controller is dead, or if
1554 1554 * command status indicates no error.
1555 1555 */
1556 1556 if (cmd->nc_nvme->n_dead)
1557 1557 return (EIO);
1558 1558
1559 1559 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
1560 1560 cqe->cqe_sf.sf_sc == NVME_CQE_SC_GEN_SUCCESS)
1561 1561 return (0);
1562 1562
1563 1563 if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC)
1564 1564 return (nvme_check_generic_cmd_status(cmd));
1565 1565 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_SPECIFIC)
1566 1566 return (nvme_check_specific_cmd_status(cmd));
1567 1567 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_INTEGRITY)
1568 1568 return (nvme_check_integrity_cmd_status(cmd));
1569 1569 else if (cqe->cqe_sf.sf_sct == NVME_CQE_SCT_VENDOR)
1570 1570 return (nvme_check_vendor_cmd_status(cmd));
1571 1571
1572 1572 return (nvme_check_unknown_cmd_status(cmd));
1573 1573 }
1574 1574
1575 1575 static int
1576 1576 nvme_abort_cmd(nvme_cmd_t *abort_cmd, uint_t sec)
1577 1577 {
1578 1578 nvme_t *nvme = abort_cmd->nc_nvme;
1579 1579 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1580 1580 nvme_abort_cmd_t ac = { 0 };
1581 1581 int ret = 0;
1582 1582
1583 1583 sema_p(&nvme->n_abort_sema);
1584 1584
1585 1585 ac.b.ac_cid = abort_cmd->nc_sqe.sqe_cid;
1586 1586 ac.b.ac_sqid = abort_cmd->nc_sqid;
1587 1587
1588 1588 cmd->nc_sqid = 0;
1589 1589 cmd->nc_sqe.sqe_opc = NVME_OPC_ABORT;
1590 1590 cmd->nc_callback = nvme_wakeup_cmd;
1591 1591 cmd->nc_sqe.sqe_cdw10 = ac.r;
1592 1592
1593 1593 /*
1594 1594 * Send the ABORT to the hardware. The ABORT command will return _after_
1595 1595 * the aborted command has completed (aborted or otherwise), but since
1596 1596 * we still hold the aborted command's mutex its callback hasn't been
1597 1597 * processed yet.
1598 1598 */
1599 1599 nvme_admin_cmd(cmd, sec);
1600 1600 sema_v(&nvme->n_abort_sema);
1601 1601
1602 1602 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
1603 1603 dev_err(nvme->n_dip, CE_WARN,
1604 1604 "!ABORT failed with sct = %x, sc = %x",
1605 1605 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1606 1606 atomic_inc_32(&nvme->n_abort_failed);
1607 1607 } else {
1608 1608 dev_err(nvme->n_dip, CE_WARN,
1609 1609 "!ABORT of command %d/%d %ssuccessful",
1610 1610 abort_cmd->nc_sqe.sqe_cid, abort_cmd->nc_sqid,
1611 1611 cmd->nc_cqe.cqe_dw0 & 1 ? "un" : "");
1612 1612 if ((cmd->nc_cqe.cqe_dw0 & 1) == 0)
1613 1613 atomic_inc_32(&nvme->n_cmd_aborted);
1614 1614 }
1615 1615
1616 1616 nvme_free_cmd(cmd);
1617 1617 return (ret);
1618 1618 }
1619 1619
1620 1620 /*
1621 1621 * nvme_wait_cmd -- wait for command completion or timeout
1622 1622 *
1623 1623 * In case of a serious error or a timeout of the abort command the hardware
1624 1624 * will be declared dead and FMA will be notified.
1625 1625 */
1626 1626 static void
1627 1627 nvme_wait_cmd(nvme_cmd_t *cmd, uint_t sec)
1628 1628 {
1629 1629 clock_t timeout = ddi_get_lbolt() + drv_usectohz(sec * MICROSEC);
1630 1630 nvme_t *nvme = cmd->nc_nvme;
1631 1631 nvme_reg_csts_t csts;
1632 1632 nvme_qpair_t *qp;
1633 1633
1634 1634 ASSERT(mutex_owned(&cmd->nc_mutex));
1635 1635
1636 1636 while (!cmd->nc_completed) {
1637 1637 if (cv_timedwait(&cmd->nc_cv, &cmd->nc_mutex, timeout) == -1)
1638 1638 break;
1639 1639 }
1640 1640
1641 1641 if (cmd->nc_completed)
1642 1642 return;
1643 1643
1644 1644 /*
1645 1645 * The command timed out.
1646 1646 *
1647 1647 * Check controller for fatal status, any errors associated with the
1648 1648 * register or DMA handle, or for a double timeout (abort command timed
1649 1649 * out). If necessary log a warning and call FMA.
1650 1650 */
1651 1651 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
1652 1652 dev_err(nvme->n_dip, CE_WARN, "!command %d/%d timeout, "
1653 1653 "OPC = %x, CFS = %d", cmd->nc_sqe.sqe_cid, cmd->nc_sqid,
1654 1654 cmd->nc_sqe.sqe_opc, csts.b.csts_cfs);
1655 1655 atomic_inc_32(&nvme->n_cmd_timeout);
1656 1656
1657 1657 if (csts.b.csts_cfs ||
1658 1658 nvme_check_regs_hdl(nvme) ||
1659 1659 nvme_check_dma_hdl(cmd->nc_dma) ||
1660 1660 cmd->nc_sqe.sqe_opc == NVME_OPC_ABORT) {
1661 1661 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1662 1662 nvme->n_dead = B_TRUE;
1663 1663 } else if (nvme_abort_cmd(cmd, sec) == 0) {
1664 1664 /*
1665 1665 * If the abort succeeded the command should complete
1666 1666 * immediately with an appropriate status.
1667 1667 */
1668 1668 while (!cmd->nc_completed)
1669 1669 cv_wait(&cmd->nc_cv, &cmd->nc_mutex);
1670 1670
1671 1671 return;
1672 1672 }
1673 1673
1674 1674 qp = nvme->n_ioq[cmd->nc_sqid];
1675 1675
1676 1676 mutex_enter(&qp->nq_mutex);
1677 1677 (void) nvme_unqueue_cmd(nvme, qp, cmd->nc_sqe.sqe_cid);
1678 1678 mutex_exit(&qp->nq_mutex);
1679 1679
1680 1680 /*
1681 1681 * As we don't know what the presumed dead hardware might still do with
1682 1682 * the DMA memory, we'll put the command on the lost commands list if it
1683 1683 * has any DMA memory.
1684 1684 */
1685 1685 if (cmd->nc_dma != NULL) {
1686 1686 mutex_enter(&nvme_lc_mutex);
1687 1687 list_insert_head(&nvme_lost_cmds, cmd);
1688 1688 mutex_exit(&nvme_lc_mutex);
1689 1689 }
1690 1690 }
1691 1691
1692 1692 static void
1693 1693 nvme_wakeup_cmd(void *arg)
1694 1694 {
1695 1695 nvme_cmd_t *cmd = arg;
1696 1696
1697 1697 mutex_enter(&cmd->nc_mutex);
1698 1698 cmd->nc_completed = B_TRUE;
1699 1699 cv_signal(&cmd->nc_cv);
1700 1700 mutex_exit(&cmd->nc_mutex);
1701 1701 }
1702 1702
1703 1703 static void
1704 1704 nvme_async_event_task(void *arg)
1705 1705 {
1706 1706 nvme_cmd_t *cmd = arg;
1707 1707 nvme_t *nvme = cmd->nc_nvme;
1708 1708 nvme_error_log_entry_t *error_log = NULL;
1709 1709 nvme_health_log_t *health_log = NULL;
1710 1710 size_t logsize = 0;
1711 1711 nvme_async_event_t event;
1712 1712
1713 1713 /*
1714 1714 * Check for errors associated with the async request itself. The only
1715 1715 * command-specific error is "async event limit exceeded", which
1716 1716 * indicates a programming error in the driver and causes a panic in
1717 1717 * nvme_check_cmd_status().
1718 1718 *
1719 1719 * Other possible errors are various scenarios where the async request
1720 1720 * was aborted, or internal errors in the device. Internal errors are
1721 1721 * reported to FMA, the command aborts need no special handling here.
1722 1722 *
1723 1723 * And finally, at least qemu nvme does not support async events,
1724 1724 * and will return NVME_CQE_SC_GEN_INV_OPC | DNR. If so, we
1725 1725 * will avoid posting async events.
1726 1726 */
1727 1727
1728 1728 if (nvme_check_cmd_status(cmd) != 0) {
1729 1729 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
1730 1730 "!async event request returned failure, sct = %x, "
1731 1731 "sc = %x, dnr = %d, m = %d", cmd->nc_cqe.cqe_sf.sf_sct,
1732 1732 cmd->nc_cqe.cqe_sf.sf_sc, cmd->nc_cqe.cqe_sf.sf_dnr,
1733 1733 cmd->nc_cqe.cqe_sf.sf_m);
1734 1734
1735 1735 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
1736 1736 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INTERNAL_ERR) {
1737 1737 cmd->nc_nvme->n_dead = B_TRUE;
1738 1738 ddi_fm_service_impact(cmd->nc_nvme->n_dip,
1739 1739 DDI_SERVICE_LOST);
1740 1740 }
1741 1741
1742 1742 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
1743 1743 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_OPC &&
1744 1744 cmd->nc_cqe.cqe_sf.sf_dnr == 1) {
1745 1745 nvme->n_async_event_supported = B_FALSE;
1746 1746 }
1747 1747
1748 1748 nvme_free_cmd(cmd);
1749 1749 return;
1750 1750 }
1751 1751
1752 1752
1753 1753 event.r = cmd->nc_cqe.cqe_dw0;
1754 1754
1755 1755 /* Clear CQE and re-submit the async request. */
1756 1756 bzero(&cmd->nc_cqe, sizeof (nvme_cqe_t));
1757 1757 nvme_submit_admin_cmd(nvme->n_adminq, cmd);
1758 1758
1759 1759 switch (event.b.ae_type) {
1760 1760 case NVME_ASYNC_TYPE_ERROR:
1761 1761 if (event.b.ae_logpage == NVME_LOGPAGE_ERROR) {
1762 1762 (void) nvme_get_logpage(nvme, B_FALSE,
1763 1763 (void **)&error_log, &logsize, event.b.ae_logpage);
1764 1764 } else {
1765 1765 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
1766 1766 "async event reply: %d", event.b.ae_logpage);
1767 1767 atomic_inc_32(&nvme->n_wrong_logpage);
1768 1768 }
1769 1769
1770 1770 switch (event.b.ae_info) {
1771 1771 case NVME_ASYNC_ERROR_INV_SQ:
1772 1772 dev_err(nvme->n_dip, CE_PANIC, "programming error: "
1773 1773 "invalid submission queue");
1774 1774 return;
1775 1775
1776 1776 case NVME_ASYNC_ERROR_INV_DBL:
1777 1777 dev_err(nvme->n_dip, CE_PANIC, "programming error: "
1778 1778 "invalid doorbell write value");
1779 1779 return;
1780 1780
1781 1781 case NVME_ASYNC_ERROR_DIAGFAIL:
1782 1782 dev_err(nvme->n_dip, CE_WARN, "!diagnostic failure");
1783 1783 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1784 1784 nvme->n_dead = B_TRUE;
1785 1785 atomic_inc_32(&nvme->n_diagfail_event);
1786 1786 break;
1787 1787
1788 1788 case NVME_ASYNC_ERROR_PERSISTENT:
1789 1789 dev_err(nvme->n_dip, CE_WARN, "!persistent internal "
1790 1790 "device error");
1791 1791 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
1792 1792 nvme->n_dead = B_TRUE;
1793 1793 atomic_inc_32(&nvme->n_persistent_event);
1794 1794 break;
1795 1795
1796 1796 case NVME_ASYNC_ERROR_TRANSIENT:
1797 1797 dev_err(nvme->n_dip, CE_WARN, "!transient internal "
1798 1798 "device error");
1799 1799 /* TODO: send ereport */
1800 1800 atomic_inc_32(&nvme->n_transient_event);
1801 1801 break;
1802 1802
1803 1803 case NVME_ASYNC_ERROR_FW_LOAD:
1804 1804 dev_err(nvme->n_dip, CE_WARN,
1805 1805 "!firmware image load error");
1806 1806 atomic_inc_32(&nvme->n_fw_load_event);
1807 1807 break;
1808 1808 }
1809 1809 break;
1810 1810
1811 1811 case NVME_ASYNC_TYPE_HEALTH:
1812 1812 if (event.b.ae_logpage == NVME_LOGPAGE_HEALTH) {
1813 1813 (void) nvme_get_logpage(nvme, B_FALSE,
1814 1814 (void **)&health_log, &logsize, event.b.ae_logpage,
1815 1815 -1);
1816 1816 } else {
1817 1817 dev_err(nvme->n_dip, CE_WARN, "!wrong logpage in "
1818 1818 "async event reply: %d", event.b.ae_logpage);
1819 1819 atomic_inc_32(&nvme->n_wrong_logpage);
1820 1820 }
1821 1821
1822 1822 switch (event.b.ae_info) {
1823 1823 case NVME_ASYNC_HEALTH_RELIABILITY:
1824 1824 dev_err(nvme->n_dip, CE_WARN,
1825 1825 "!device reliability compromised");
1826 1826 /* TODO: send ereport */
1827 1827 atomic_inc_32(&nvme->n_reliability_event);
1828 1828 break;
1829 1829
1830 1830 case NVME_ASYNC_HEALTH_TEMPERATURE:
1831 1831 dev_err(nvme->n_dip, CE_WARN,
1832 1832 "!temperature above threshold");
1833 1833 /* TODO: send ereport */
1834 1834 atomic_inc_32(&nvme->n_temperature_event);
1835 1835 break;
1836 1836
1837 1837 case NVME_ASYNC_HEALTH_SPARE:
1838 1838 dev_err(nvme->n_dip, CE_WARN,
1839 1839 "!spare space below threshold");
1840 1840 /* TODO: send ereport */
1841 1841 atomic_inc_32(&nvme->n_spare_event);
1842 1842 break;
1843 1843 }
1844 1844 break;
1845 1845
1846 1846 case NVME_ASYNC_TYPE_VENDOR:
1847 1847 dev_err(nvme->n_dip, CE_WARN, "!vendor specific async event "
1848 1848 "received, info = %x, logpage = %x", event.b.ae_info,
1849 1849 event.b.ae_logpage);
1850 1850 atomic_inc_32(&nvme->n_vendor_event);
1851 1851 break;
1852 1852
1853 1853 default:
1854 1854 dev_err(nvme->n_dip, CE_WARN, "!unknown async event received, "
1855 1855 "type = %x, info = %x, logpage = %x", event.b.ae_type,
1856 1856 event.b.ae_info, event.b.ae_logpage);
1857 1857 atomic_inc_32(&nvme->n_unknown_event);
1858 1858 break;
1859 1859 }
1860 1860
1861 1861 if (error_log)
1862 1862 kmem_free(error_log, logsize);
1863 1863
1864 1864 if (health_log)
1865 1865 kmem_free(health_log, logsize);
1866 1866 }
1867 1867
1868 1868 static void
1869 1869 nvme_admin_cmd(nvme_cmd_t *cmd, int sec)
1870 1870 {
1871 1871 mutex_enter(&cmd->nc_mutex);
1872 1872 nvme_submit_admin_cmd(cmd->nc_nvme->n_adminq, cmd);
1873 1873 nvme_wait_cmd(cmd, sec);
1874 1874 mutex_exit(&cmd->nc_mutex);
1875 1875 }
1876 1876
1877 1877 static void
1878 1878 nvme_async_event(nvme_t *nvme)
1879 1879 {
1880 1880 nvme_cmd_t *cmd;
1881 1881
1882 1882 cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1883 1883 cmd->nc_sqid = 0;
1884 1884 cmd->nc_sqe.sqe_opc = NVME_OPC_ASYNC_EVENT;
1885 1885 cmd->nc_callback = nvme_async_event_task;
1886 1886 cmd->nc_dontpanic = B_TRUE;
1887 1887
1888 1888 nvme_submit_admin_cmd(nvme->n_adminq, cmd);
1889 1889 }
1890 1890
1891 1891 static int
1892 1892 nvme_format_nvm(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t lbaf,
1893 1893 boolean_t ms, uint8_t pi, boolean_t pil, uint8_t ses)
1894 1894 {
1895 1895 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1896 1896 nvme_format_nvm_t format_nvm = { 0 };
1897 1897 int ret;
1898 1898
1899 1899 format_nvm.b.fm_lbaf = lbaf & 0xf;
1900 1900 format_nvm.b.fm_ms = ms ? 1 : 0;
1901 1901 format_nvm.b.fm_pi = pi & 0x7;
1902 1902 format_nvm.b.fm_pil = pil ? 1 : 0;
1903 1903 format_nvm.b.fm_ses = ses & 0x7;
1904 1904
1905 1905 cmd->nc_sqid = 0;
1906 1906 cmd->nc_callback = nvme_wakeup_cmd;
1907 1907 cmd->nc_sqe.sqe_nsid = nsid;
1908 1908 cmd->nc_sqe.sqe_opc = NVME_OPC_NVM_FORMAT;
1909 1909 cmd->nc_sqe.sqe_cdw10 = format_nvm.r;
1910 1910
1911 1911 /*
1912 1912 * Some devices like Samsung SM951 don't allow formatting of all
1913 1913 * namespaces in one command. Handle that gracefully.
1914 1914 */
1915 1915 if (nsid == (uint32_t)-1)
1916 1916 cmd->nc_dontpanic = B_TRUE;
1917 1917 /*
1918 1918 * If this format request was initiated by the user, then don't allow a
1919 1919 * programmer error to panic the system.
1920 1920 */
1921 1921 if (user)
1922 1922 cmd->nc_dontpanic = B_TRUE;
1923 1923
1924 1924 nvme_admin_cmd(cmd, nvme_format_cmd_timeout);
1925 1925
1926 1926 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
1927 1927 dev_err(nvme->n_dip, CE_WARN,
1928 1928 "!FORMAT failed with sct = %x, sc = %x",
1929 1929 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
1930 1930 }
1931 1931
1932 1932 nvme_free_cmd(cmd);
1933 1933 return (ret);
1934 1934 }
1935 1935
1936 1936 static int
1937 1937 nvme_get_logpage(nvme_t *nvme, boolean_t user, void **buf, size_t *bufsize,
1938 1938 uint8_t logpage, ...)
1939 1939 {
1940 1940 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
1941 1941 nvme_getlogpage_t getlogpage = { 0 };
1942 1942 va_list ap;
1943 1943 int ret;
1944 1944
1945 1945 va_start(ap, logpage);
1946 1946
1947 1947 cmd->nc_sqid = 0;
1948 1948 cmd->nc_callback = nvme_wakeup_cmd;
1949 1949 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_LOG_PAGE;
1950 1950
1951 1951 if (user)
1952 1952 cmd->nc_dontpanic = B_TRUE;
1953 1953
1954 1954 getlogpage.b.lp_lid = logpage;
1955 1955
1956 1956 switch (logpage) {
1957 1957 case NVME_LOGPAGE_ERROR:
1958 1958 cmd->nc_sqe.sqe_nsid = (uint32_t)-1;
1959 1959 /*
1960 1960 * The GET LOG PAGE command can use at most 2 pages to return
1961 1961 * data, PRP lists are not supported.
1962 1962 */
1963 1963 *bufsize = MIN(2 * nvme->n_pagesize,
1964 1964 nvme->n_error_log_len * sizeof (nvme_error_log_entry_t));
1965 1965 break;
1966 1966
1967 1967 case NVME_LOGPAGE_HEALTH:
1968 1968 cmd->nc_sqe.sqe_nsid = va_arg(ap, uint32_t);
1969 1969 *bufsize = sizeof (nvme_health_log_t);
1970 1970 break;
1971 1971
1972 1972 case NVME_LOGPAGE_FWSLOT:
1973 1973 cmd->nc_sqe.sqe_nsid = (uint32_t)-1;
1974 1974 *bufsize = sizeof (nvme_fwslot_log_t);
1975 1975 break;
1976 1976
1977 1977 default:
1978 1978 dev_err(nvme->n_dip, CE_WARN, "!unknown log page requested: %d",
1979 1979 logpage);
1980 1980 atomic_inc_32(&nvme->n_unknown_logpage);
1981 1981 ret = EINVAL;
1982 1982 goto fail;
1983 1983 }
1984 1984
1985 1985 va_end(ap);
1986 1986
1987 1987 getlogpage.b.lp_numd = *bufsize / sizeof (uint32_t) - 1;
1988 1988
1989 1989 cmd->nc_sqe.sqe_cdw10 = getlogpage.r;
1990 1990
1991 1991 if (nvme_zalloc_dma(nvme, *bufsize,
1992 1992 DDI_DMA_READ, &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
1993 1993 dev_err(nvme->n_dip, CE_WARN,
1994 1994 "!nvme_zalloc_dma failed for GET LOG PAGE");
1995 1995 ret = ENOMEM;
1996 1996 goto fail;
1997 1997 }
1998 1998
1999 1999 if (cmd->nc_dma->nd_ncookie > 2) {
2000 2000 dev_err(nvme->n_dip, CE_WARN,
2001 2001 "!too many DMA cookies for GET LOG PAGE");
2002 2002 atomic_inc_32(&nvme->n_too_many_cookies);
2003 2003 ret = ENOMEM;
2004 2004 goto fail;
2005 2005 }
2006 2006
2007 2007 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress;
2008 2008 if (cmd->nc_dma->nd_ncookie > 1) {
2009 2009 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
2010 2010 &cmd->nc_dma->nd_cookie);
2011 2011 cmd->nc_sqe.sqe_dptr.d_prp[1] =
2012 2012 cmd->nc_dma->nd_cookie.dmac_laddress;
2013 2013 }
2014 2014
2015 2015 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2016 2016
2017 2017 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2018 2018 dev_err(nvme->n_dip, CE_WARN,
2019 2019 "!GET LOG PAGE failed with sct = %x, sc = %x",
2020 2020 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2021 2021 goto fail;
2022 2022 }
2023 2023
2024 2024 *buf = kmem_alloc(*bufsize, KM_SLEEP);
2025 2025 bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize);
2026 2026
2027 2027 fail:
2028 2028 nvme_free_cmd(cmd);
2029 2029
2030 2030 return (ret);
2031 2031 }
2032 2032
2033 2033 static int
2034 2034 nvme_identify(nvme_t *nvme, boolean_t user, uint32_t nsid, void **buf)
2035 2035 {
2036 2036 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2037 2037 int ret;
2038 2038
2039 2039 if (buf == NULL)
2040 2040 return (EINVAL);
2041 2041
2042 2042 cmd->nc_sqid = 0;
2043 2043 cmd->nc_callback = nvme_wakeup_cmd;
2044 2044 cmd->nc_sqe.sqe_opc = NVME_OPC_IDENTIFY;
2045 2045 cmd->nc_sqe.sqe_nsid = nsid;
2046 2046 cmd->nc_sqe.sqe_cdw10 = nsid ? NVME_IDENTIFY_NSID : NVME_IDENTIFY_CTRL;
2047 2047
2048 2048 if (nvme_zalloc_dma(nvme, NVME_IDENTIFY_BUFSIZE, DDI_DMA_READ,
2049 2049 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
2050 2050 dev_err(nvme->n_dip, CE_WARN,
2051 2051 "!nvme_zalloc_dma failed for IDENTIFY");
2052 2052 ret = ENOMEM;
2053 2053 goto fail;
2054 2054 }
2055 2055
2056 2056 if (cmd->nc_dma->nd_ncookie > 2) {
2057 2057 dev_err(nvme->n_dip, CE_WARN,
2058 2058 "!too many DMA cookies for IDENTIFY");
2059 2059 atomic_inc_32(&nvme->n_too_many_cookies);
2060 2060 ret = ENOMEM;
2061 2061 goto fail;
2062 2062 }
2063 2063
2064 2064 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress;
2065 2065 if (cmd->nc_dma->nd_ncookie > 1) {
2066 2066 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
2067 2067 &cmd->nc_dma->nd_cookie);
2068 2068 cmd->nc_sqe.sqe_dptr.d_prp[1] =
2069 2069 cmd->nc_dma->nd_cookie.dmac_laddress;
2070 2070 }
2071 2071
2072 2072 if (user)
2073 2073 cmd->nc_dontpanic = B_TRUE;
2074 2074
2075 2075 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2076 2076
2077 2077 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2078 2078 dev_err(nvme->n_dip, CE_WARN,
2079 2079 "!IDENTIFY failed with sct = %x, sc = %x",
2080 2080 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2081 2081 goto fail;
2082 2082 }
2083 2083
2084 2084 *buf = kmem_alloc(NVME_IDENTIFY_BUFSIZE, KM_SLEEP);
2085 2085 bcopy(cmd->nc_dma->nd_memp, *buf, NVME_IDENTIFY_BUFSIZE);
2086 2086
2087 2087 fail:
2088 2088 nvme_free_cmd(cmd);
2089 2089
2090 2090 return (ret);
2091 2091 }
2092 2092
2093 2093 static int
2094 2094 nvme_set_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature,
2095 2095 uint32_t val, uint32_t *res)
2096 2096 {
2097 2097 _NOTE(ARGUNUSED(nsid));
2098 2098 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2099 2099 int ret = EINVAL;
2100 2100
2101 2101 ASSERT(res != NULL);
2102 2102
2103 2103 cmd->nc_sqid = 0;
2104 2104 cmd->nc_callback = nvme_wakeup_cmd;
2105 2105 cmd->nc_sqe.sqe_opc = NVME_OPC_SET_FEATURES;
2106 2106 cmd->nc_sqe.sqe_cdw10 = feature;
2107 2107 cmd->nc_sqe.sqe_cdw11 = val;
2108 2108
2109 2109 if (user)
2110 2110 cmd->nc_dontpanic = B_TRUE;
2111 2111
2112 2112 switch (feature) {
2113 2113 case NVME_FEAT_WRITE_CACHE:
2114 2114 if (!nvme->n_write_cache_present)
2115 2115 goto fail;
2116 2116 break;
2117 2117
2118 2118 case NVME_FEAT_NQUEUES:
2119 2119 break;
2120 2120
2121 2121 default:
2122 2122 goto fail;
2123 2123 }
2124 2124
2125 2125 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2126 2126
2127 2127 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2128 2128 dev_err(nvme->n_dip, CE_WARN,
2129 2129 "!SET FEATURES %d failed with sct = %x, sc = %x",
2130 2130 feature, cmd->nc_cqe.cqe_sf.sf_sct,
2131 2131 cmd->nc_cqe.cqe_sf.sf_sc);
2132 2132 goto fail;
2133 2133 }
2134 2134
2135 2135 *res = cmd->nc_cqe.cqe_dw0;
2136 2136
2137 2137 fail:
2138 2138 nvme_free_cmd(cmd);
2139 2139 return (ret);
2140 2140 }
2141 2141
2142 2142 static int
2143 2143 nvme_get_features(nvme_t *nvme, boolean_t user, uint32_t nsid, uint8_t feature,
2144 2144 uint32_t *res, void **buf, size_t *bufsize)
2145 2145 {
2146 2146 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2147 2147 int ret = EINVAL;
2148 2148
2149 2149 ASSERT(res != NULL);
2150 2150
2151 2151 if (bufsize != NULL)
2152 2152 *bufsize = 0;
2153 2153
2154 2154 cmd->nc_sqid = 0;
2155 2155 cmd->nc_callback = nvme_wakeup_cmd;
2156 2156 cmd->nc_sqe.sqe_opc = NVME_OPC_GET_FEATURES;
2157 2157 cmd->nc_sqe.sqe_cdw10 = feature;
2158 2158 cmd->nc_sqe.sqe_cdw11 = *res;
2159 2159
2160 2160 /*
2161 2161 * For some of the optional features there doesn't seem to be a method
2162 2162 * of detecting whether it is supported other than using it. This will
2163 2163 * cause "Invalid Field in Command" error, which is normally considered
2164 2164 * a programming error. Set the nc_dontpanic flag to override the panic
2165 2165 * in nvme_check_generic_cmd_status().
2166 2166 */
2167 2167 switch (feature) {
2168 2168 case NVME_FEAT_ARBITRATION:
2169 2169 case NVME_FEAT_POWER_MGMT:
2170 2170 case NVME_FEAT_TEMPERATURE:
2171 2171 case NVME_FEAT_ERROR:
2172 2172 case NVME_FEAT_NQUEUES:
2173 2173 case NVME_FEAT_INTR_COAL:
2174 2174 case NVME_FEAT_INTR_VECT:
2175 2175 case NVME_FEAT_WRITE_ATOM:
2176 2176 case NVME_FEAT_ASYNC_EVENT:
2177 2177 break;
2178 2178
2179 2179 case NVME_FEAT_WRITE_CACHE:
2180 2180 if (!nvme->n_write_cache_present)
2181 2181 goto fail;
2182 2182 break;
2183 2183
2184 2184 case NVME_FEAT_LBA_RANGE:
2185 2185 if (!nvme->n_lba_range_supported)
2186 2186 goto fail;
2187 2187
2188 2188 cmd->nc_dontpanic = B_TRUE;
2189 2189 cmd->nc_sqe.sqe_nsid = nsid;
2190 2190 ASSERT(bufsize != NULL);
2191 2191 *bufsize = NVME_LBA_RANGE_BUFSIZE;
2192 2192 break;
2193 2193
2194 2194 case NVME_FEAT_AUTO_PST:
2195 2195 if (!nvme->n_auto_pst_supported)
2196 2196 goto fail;
2197 2197
2198 2198 ASSERT(bufsize != NULL);
2199 2199 *bufsize = NVME_AUTO_PST_BUFSIZE;
2200 2200 break;
2201 2201
2202 2202 case NVME_FEAT_PROGRESS:
2203 2203 if (!nvme->n_progress_supported)
2204 2204 goto fail;
2205 2205
2206 2206 cmd->nc_dontpanic = B_TRUE;
2207 2207 break;
2208 2208
2209 2209 default:
2210 2210 goto fail;
2211 2211 }
2212 2212
2213 2213 if (user)
2214 2214 cmd->nc_dontpanic = B_TRUE;
2215 2215
2216 2216 if (bufsize != NULL && *bufsize != 0) {
2217 2217 if (nvme_zalloc_dma(nvme, *bufsize, DDI_DMA_READ,
2218 2218 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
2219 2219 dev_err(nvme->n_dip, CE_WARN,
2220 2220 "!nvme_zalloc_dma failed for GET FEATURES");
2221 2221 ret = ENOMEM;
2222 2222 goto fail;
2223 2223 }
2224 2224
2225 2225 if (cmd->nc_dma->nd_ncookie > 2) {
2226 2226 dev_err(nvme->n_dip, CE_WARN,
2227 2227 "!too many DMA cookies for GET FEATURES");
2228 2228 atomic_inc_32(&nvme->n_too_many_cookies);
2229 2229 ret = ENOMEM;
2230 2230 goto fail;
2231 2231 }
2232 2232
2233 2233 cmd->nc_sqe.sqe_dptr.d_prp[0] =
2234 2234 cmd->nc_dma->nd_cookie.dmac_laddress;
2235 2235 if (cmd->nc_dma->nd_ncookie > 1) {
2236 2236 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
2237 2237 &cmd->nc_dma->nd_cookie);
2238 2238 cmd->nc_sqe.sqe_dptr.d_prp[1] =
2239 2239 cmd->nc_dma->nd_cookie.dmac_laddress;
2240 2240 }
2241 2241 }
2242 2242
2243 2243 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2244 2244
2245 2245 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2246 2246 boolean_t known = B_TRUE;
2247 2247
2248 2248 /* Check if this is unsupported optional feature */
2249 2249 if (cmd->nc_cqe.cqe_sf.sf_sct == NVME_CQE_SCT_GENERIC &&
2250 2250 cmd->nc_cqe.cqe_sf.sf_sc == NVME_CQE_SC_GEN_INV_FLD) {
2251 2251 switch (feature) {
2252 2252 case NVME_FEAT_LBA_RANGE:
2253 2253 nvme->n_lba_range_supported = B_FALSE;
2254 2254 break;
2255 2255 case NVME_FEAT_PROGRESS:
2256 2256 nvme->n_progress_supported = B_FALSE;
2257 2257 break;
2258 2258 default:
2259 2259 known = B_FALSE;
2260 2260 break;
2261 2261 }
2262 2262 } else {
2263 2263 known = B_FALSE;
2264 2264 }
2265 2265
2266 2266 /* Report the error otherwise */
2267 2267 if (!known) {
2268 2268 dev_err(nvme->n_dip, CE_WARN,
2269 2269 "!GET FEATURES %d failed with sct = %x, sc = %x",
2270 2270 feature, cmd->nc_cqe.cqe_sf.sf_sct,
2271 2271 cmd->nc_cqe.cqe_sf.sf_sc);
2272 2272 }
2273 2273
2274 2274 goto fail;
2275 2275 }
2276 2276
2277 2277 if (bufsize != NULL && *bufsize != 0) {
2278 2278 ASSERT(buf != NULL);
2279 2279 *buf = kmem_alloc(*bufsize, KM_SLEEP);
2280 2280 bcopy(cmd->nc_dma->nd_memp, *buf, *bufsize);
2281 2281 }
2282 2282
2283 2283 *res = cmd->nc_cqe.cqe_dw0;
2284 2284
2285 2285 fail:
2286 2286 nvme_free_cmd(cmd);
2287 2287 return (ret);
2288 2288 }
2289 2289
2290 2290 static int
2291 2291 nvme_write_cache_set(nvme_t *nvme, boolean_t enable)
2292 2292 {
2293 2293 nvme_write_cache_t nwc = { 0 };
2294 2294
2295 2295 if (enable)
2296 2296 nwc.b.wc_wce = 1;
2297 2297
2298 2298 return (nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_WRITE_CACHE,
2299 2299 nwc.r, &nwc.r));
2300 2300 }
2301 2301
2302 2302 static int
2303 2303 nvme_set_nqueues(nvme_t *nvme)
2304 2304 {
2305 2305 nvme_nqueues_t nq = { 0 };
2306 2306 int ret;
2307 2307
2308 2308 /*
2309 2309 * The default is to allocate one completion queue per vector.
2310 2310 */
2311 2311 if (nvme->n_completion_queues == -1)
2312 2312 nvme->n_completion_queues = nvme->n_intr_cnt;
2313 2313
2314 2314 /*
2315 2315 * There is no point in having more compeletion queues than
2316 2316 * interrupt vectors.
2317 2317 */
2318 2318 nvme->n_completion_queues = MIN(nvme->n_completion_queues,
2319 2319 nvme->n_intr_cnt);
2320 2320
2321 2321 /*
2322 2322 * The default is to use one submission queue per completion queue.
2323 2323 */
2324 2324 if (nvme->n_submission_queues == -1)
2325 2325 nvme->n_submission_queues = nvme->n_completion_queues;
2326 2326
2327 2327 /*
2328 2328 * There is no point in having more compeletion queues than
2329 2329 * submission queues.
2330 2330 */
2331 2331 nvme->n_completion_queues = MIN(nvme->n_completion_queues,
2332 2332 nvme->n_submission_queues);
2333 2333
2334 2334 ASSERT(nvme->n_submission_queues > 0);
2335 2335 ASSERT(nvme->n_completion_queues > 0);
2336 2336
2337 2337 nq.b.nq_nsq = nvme->n_submission_queues - 1;
2338 2338 nq.b.nq_ncq = nvme->n_completion_queues - 1;
2339 2339
2340 2340 ret = nvme_set_features(nvme, B_FALSE, 0, NVME_FEAT_NQUEUES, nq.r,
2341 2341 &nq.r);
2342 2342
2343 2343 if (ret == 0) {
2344 2344 /*
2345 2345 * Never use more than the requested number of queues.
2346 2346 */
2347 2347 nvme->n_submission_queues = MIN(nvme->n_submission_queues,
2348 2348 nq.b.nq_nsq + 1);
2349 2349 nvme->n_completion_queues = MIN(nvme->n_completion_queues,
2350 2350 nq.b.nq_ncq + 1);
2351 2351 }
2352 2352
2353 2353 return (ret);
2354 2354 }
2355 2355
2356 2356 static int
2357 2357 nvme_create_completion_queue(nvme_t *nvme, nvme_cq_t *cq)
2358 2358 {
2359 2359 nvme_cmd_t *cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2360 2360 nvme_create_queue_dw10_t dw10 = { 0 };
2361 2361 nvme_create_cq_dw11_t c_dw11 = { 0 };
2362 2362 int ret;
2363 2363
2364 2364 dw10.b.q_qid = cq->ncq_id;
2365 2365 dw10.b.q_qsize = cq->ncq_nentry - 1;
2366 2366
2367 2367 c_dw11.b.cq_pc = 1;
2368 2368 c_dw11.b.cq_ien = 1;
2369 2369 c_dw11.b.cq_iv = cq->ncq_id % nvme->n_intr_cnt;
2370 2370
2371 2371 cmd->nc_sqid = 0;
2372 2372 cmd->nc_callback = nvme_wakeup_cmd;
2373 2373 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_CQUEUE;
2374 2374 cmd->nc_sqe.sqe_cdw10 = dw10.r;
2375 2375 cmd->nc_sqe.sqe_cdw11 = c_dw11.r;
2376 2376 cmd->nc_sqe.sqe_dptr.d_prp[0] = cq->ncq_dma->nd_cookie.dmac_laddress;
2377 2377
2378 2378 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2379 2379
2380 2380 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2381 2381 dev_err(nvme->n_dip, CE_WARN,
2382 2382 "!CREATE CQUEUE failed with sct = %x, sc = %x",
2383 2383 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2384 2384 }
2385 2385
2386 2386 nvme_free_cmd(cmd);
2387 2387
2388 2388 return (ret);
2389 2389 }
2390 2390
2391 2391 static int
2392 2392 nvme_create_io_qpair(nvme_t *nvme, nvme_qpair_t *qp, uint16_t idx)
2393 2393 {
2394 2394 nvme_cq_t *cq = qp->nq_cq;
2395 2395 nvme_cmd_t *cmd;
2396 2396 nvme_create_queue_dw10_t dw10 = { 0 };
2397 2397 nvme_create_sq_dw11_t s_dw11 = { 0 };
2398 2398 int ret;
2399 2399
2400 2400 /*
2401 2401 * It is possible to have more qpairs than completion queues,
2402 2402 * and when the idx > ncq_id, that completion queue is shared
2403 2403 * and has already been created.
2404 2404 */
2405 2405 if (idx <= cq->ncq_id &&
2406 2406 nvme_create_completion_queue(nvme, cq) != DDI_SUCCESS)
2407 2407 return (DDI_FAILURE);
2408 2408
2409 2409 dw10.b.q_qid = idx;
2410 2410 dw10.b.q_qsize = qp->nq_nentry - 1;
2411 2411
2412 2412 s_dw11.b.sq_pc = 1;
2413 2413 s_dw11.b.sq_cqid = cq->ncq_id;
2414 2414
2415 2415 cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
2416 2416 cmd->nc_sqid = 0;
2417 2417 cmd->nc_callback = nvme_wakeup_cmd;
2418 2418 cmd->nc_sqe.sqe_opc = NVME_OPC_CREATE_SQUEUE;
2419 2419 cmd->nc_sqe.sqe_cdw10 = dw10.r;
2420 2420 cmd->nc_sqe.sqe_cdw11 = s_dw11.r;
2421 2421 cmd->nc_sqe.sqe_dptr.d_prp[0] = qp->nq_sqdma->nd_cookie.dmac_laddress;
2422 2422
2423 2423 nvme_admin_cmd(cmd, nvme_admin_cmd_timeout);
2424 2424
2425 2425 if ((ret = nvme_check_cmd_status(cmd)) != 0) {
2426 2426 dev_err(nvme->n_dip, CE_WARN,
2427 2427 "!CREATE SQUEUE failed with sct = %x, sc = %x",
2428 2428 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
2429 2429 }
2430 2430
2431 2431 nvme_free_cmd(cmd);
2432 2432
2433 2433 return (ret);
2434 2434 }
2435 2435
2436 2436 static boolean_t
2437 2437 nvme_reset(nvme_t *nvme, boolean_t quiesce)
2438 2438 {
2439 2439 nvme_reg_csts_t csts;
2440 2440 int i;
2441 2441
2442 2442 nvme_put32(nvme, NVME_REG_CC, 0);
2443 2443
2444 2444 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2445 2445 if (csts.b.csts_rdy == 1) {
2446 2446 nvme_put32(nvme, NVME_REG_CC, 0);
2447 2447 for (i = 0; i != nvme->n_timeout * 10; i++) {
2448 2448 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2449 2449 if (csts.b.csts_rdy == 0)
2450 2450 break;
2451 2451
2452 2452 if (quiesce)
2453 2453 drv_usecwait(50000);
2454 2454 else
2455 2455 delay(drv_usectohz(50000));
2456 2456 }
2457 2457 }
2458 2458
2459 2459 nvme_put32(nvme, NVME_REG_AQA, 0);
2460 2460 nvme_put32(nvme, NVME_REG_ASQ, 0);
2461 2461 nvme_put32(nvme, NVME_REG_ACQ, 0);
2462 2462
2463 2463 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2464 2464 return (csts.b.csts_rdy == 0 ? B_TRUE : B_FALSE);
2465 2465 }
2466 2466
2467 2467 static void
2468 2468 nvme_shutdown(nvme_t *nvme, int mode, boolean_t quiesce)
2469 2469 {
2470 2470 nvme_reg_cc_t cc;
2471 2471 nvme_reg_csts_t csts;
2472 2472 int i;
2473 2473
2474 2474 ASSERT(mode == NVME_CC_SHN_NORMAL || mode == NVME_CC_SHN_ABRUPT);
2475 2475
2476 2476 cc.r = nvme_get32(nvme, NVME_REG_CC);
2477 2477 cc.b.cc_shn = mode & 0x3;
2478 2478 nvme_put32(nvme, NVME_REG_CC, cc.r);
2479 2479
2480 2480 for (i = 0; i != 10; i++) {
2481 2481 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2482 2482 if (csts.b.csts_shst == NVME_CSTS_SHN_COMPLETE)
2483 2483 break;
2484 2484
2485 2485 if (quiesce)
2486 2486 drv_usecwait(100000);
2487 2487 else
2488 2488 delay(drv_usectohz(100000));
2489 2489 }
2490 2490 }
2491 2491
2492 2492
2493 2493 static void
2494 2494 nvme_prepare_devid(nvme_t *nvme, uint32_t nsid)
2495 2495 {
2496 2496 /*
2497 2497 * Section 7.7 of the spec describes how to get a unique ID for
2498 2498 * the controller: the vendor ID, the model name and the serial
2499 2499 * number shall be unique when combined.
2500 2500 *
2501 2501 * If a namespace has no EUI64 we use the above and add the hex
2502 2502 * namespace ID to get a unique ID for the namespace.
2503 2503 */
2504 2504 char model[sizeof (nvme->n_idctl->id_model) + 1];
2505 2505 char serial[sizeof (nvme->n_idctl->id_serial) + 1];
2506 2506
2507 2507 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
2508 2508 bcopy(nvme->n_idctl->id_serial, serial,
2509 2509 sizeof (nvme->n_idctl->id_serial));
2510 2510
2511 2511 model[sizeof (nvme->n_idctl->id_model)] = '\0';
2512 2512 serial[sizeof (nvme->n_idctl->id_serial)] = '\0';
2513 2513
2514 2514 nvme->n_ns[nsid - 1].ns_devid = kmem_asprintf("%4X-%s-%s-%X",
2515 2515 nvme->n_idctl->id_vid, model, serial, nsid);
2516 2516 }
2517 2517
2518 2518 static int
2519 2519 nvme_init_ns(nvme_t *nvme, int nsid)
2520 2520 {
2521 2521 nvme_namespace_t *ns = &nvme->n_ns[nsid - 1];
2522 2522 nvme_identify_nsid_t *idns;
2523 2523 boolean_t was_ignored;
2524 2524 int last_rp;
2525 2525
2526 2526 ns->ns_nvme = nvme;
2527 2527
2528 2528 if (nvme_identify(nvme, B_FALSE, nsid, (void **)&idns) != 0) {
2529 2529 dev_err(nvme->n_dip, CE_WARN,
2530 2530 "!failed to identify namespace %d", nsid);
2531 2531 return (DDI_FAILURE);
2532 2532 }
2533 2533
2534 2534 ns->ns_idns = idns;
2535 2535 ns->ns_id = nsid;
2536 2536 ns->ns_block_count = idns->id_nsize;
2537 2537 ns->ns_block_size =
2538 2538 1 << idns->id_lbaf[idns->id_flbas.lba_format].lbaf_lbads;
2539 2539 ns->ns_best_block_size = ns->ns_block_size;
2540 2540
2541 2541 /*
2542 2542 * Get the EUI64 if present. Use it for devid and device node names.
2543 2543 */
2544 2544 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1))
2545 2545 bcopy(idns->id_eui64, ns->ns_eui64, sizeof (ns->ns_eui64));
2546 2546
2547 2547 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
2548 2548 if (*(uint64_t *)ns->ns_eui64 != 0) {
2549 2549 uint8_t *eui64 = ns->ns_eui64;
2550 2550
2551 2551 (void) snprintf(ns->ns_name, sizeof (ns->ns_name),
2552 2552 "%02x%02x%02x%02x%02x%02x%02x%02x",
2553 2553 eui64[0], eui64[1], eui64[2], eui64[3],
2554 2554 eui64[4], eui64[5], eui64[6], eui64[7]);
2555 2555 } else {
2556 2556 (void) snprintf(ns->ns_name, sizeof (ns->ns_name), "%d",
2557 2557 ns->ns_id);
2558 2558
2559 2559 nvme_prepare_devid(nvme, ns->ns_id);
2560 2560 }
2561 2561
2562 2562 /*
2563 2563 * Find the LBA format with no metadata and the best relative
2564 2564 * performance. A value of 3 means "degraded", 0 is best.
2565 2565 */
2566 2566 last_rp = 3;
2567 2567 for (int j = 0; j <= idns->id_nlbaf; j++) {
2568 2568 if (idns->id_lbaf[j].lbaf_lbads == 0)
2569 2569 break;
2570 2570 if (idns->id_lbaf[j].lbaf_ms != 0)
2571 2571 continue;
2572 2572 if (idns->id_lbaf[j].lbaf_rp >= last_rp)
2573 2573 continue;
2574 2574 last_rp = idns->id_lbaf[j].lbaf_rp;
2575 2575 ns->ns_best_block_size =
2576 2576 1 << idns->id_lbaf[j].lbaf_lbads;
2577 2577 }
2578 2578
2579 2579 if (ns->ns_best_block_size < nvme->n_min_block_size)
2580 2580 ns->ns_best_block_size = nvme->n_min_block_size;
2581 2581
2582 2582 was_ignored = ns->ns_ignore;
2583 2583
2584 2584 /*
2585 2585 * We currently don't support namespaces that use either:
2586 2586 * - protection information
2587 2587 * - illegal block size (< 512)
2588 2588 */
2589 2589 if (idns->id_dps.dp_pinfo) {
2590 2590 dev_err(nvme->n_dip, CE_WARN,
2591 2591 "!ignoring namespace %d, unsupported feature: "
2592 2592 "pinfo = %d", nsid, idns->id_dps.dp_pinfo);
2593 2593 ns->ns_ignore = B_TRUE;
2594 2594 } else if (ns->ns_block_size < 512) {
2595 2595 dev_err(nvme->n_dip, CE_WARN,
2596 2596 "!ignoring namespace %d, unsupported block size %"PRIu64,
2597 2597 nsid, (uint64_t)ns->ns_block_size);
2598 2598 ns->ns_ignore = B_TRUE;
2599 2599 } else {
2600 2600 ns->ns_ignore = B_FALSE;
2601 2601 }
2602 2602
2603 2603 /*
2604 2604 * Keep a count of namespaces which are attachable.
2605 2605 * See comments in nvme_bd_driveinfo() to understand its effect.
2606 2606 */
2607 2607 if (was_ignored) {
2608 2608 /*
2609 2609 * Previously ignored, but now not. Count it.
2610 2610 */
2611 2611 if (!ns->ns_ignore)
2612 2612 nvme->n_namespaces_attachable++;
2613 2613 } else {
2614 2614 /*
2615 2615 * Wasn't ignored previously, but now needs to be.
2616 2616 * Discount it.
2617 2617 */
2618 2618 if (ns->ns_ignore)
2619 2619 nvme->n_namespaces_attachable--;
2620 2620 }
2621 2621
2622 2622 return (DDI_SUCCESS);
2623 2623 }
2624 2624
2625 2625 static int
2626 2626 nvme_init(nvme_t *nvme)
2627 2627 {
2628 2628 nvme_reg_cc_t cc = { 0 };
2629 2629 nvme_reg_aqa_t aqa = { 0 };
2630 2630 nvme_reg_asq_t asq = { 0 };
2631 2631 nvme_reg_acq_t acq = { 0 };
2632 2632 nvme_reg_cap_t cap;
2633 2633 nvme_reg_vs_t vs;
2634 2634 nvme_reg_csts_t csts;
2635 2635 int i = 0;
2636 2636 uint16_t nqueues;
2637 2637 uint_t tq_threads;
2638 2638 char model[sizeof (nvme->n_idctl->id_model) + 1];
2639 2639 char *vendor, *product;
2640 2640
2641 2641 /* Check controller version */
2642 2642 vs.r = nvme_get32(nvme, NVME_REG_VS);
2643 2643 nvme->n_version.v_major = vs.b.vs_mjr;
2644 2644 nvme->n_version.v_minor = vs.b.vs_mnr;
2645 2645 dev_err(nvme->n_dip, CE_CONT, "?NVMe spec version %d.%d",
2646 2646 nvme->n_version.v_major, nvme->n_version.v_minor);
2647 2647
2648 2648 if (nvme->n_version.v_major > nvme_version_major) {
2649 2649 dev_err(nvme->n_dip, CE_WARN, "!no support for version > %d.x",
2650 2650 nvme_version_major);
2651 2651 if (nvme->n_strict_version)
2652 2652 goto fail;
2653 2653 }
2654 2654
2655 2655 /* retrieve controller configuration */
2656 2656 cap.r = nvme_get64(nvme, NVME_REG_CAP);
2657 2657
2658 2658 if ((cap.b.cap_css & NVME_CAP_CSS_NVM) == 0) {
2659 2659 dev_err(nvme->n_dip, CE_WARN,
2660 2660 "!NVM command set not supported by hardware");
2661 2661 goto fail;
2662 2662 }
2663 2663
2664 2664 nvme->n_nssr_supported = cap.b.cap_nssrs;
2665 2665 nvme->n_doorbell_stride = 4 << cap.b.cap_dstrd;
2666 2666 nvme->n_timeout = cap.b.cap_to;
2667 2667 nvme->n_arbitration_mechanisms = cap.b.cap_ams;
2668 2668 nvme->n_cont_queues_reqd = cap.b.cap_cqr;
2669 2669 nvme->n_max_queue_entries = cap.b.cap_mqes + 1;
2670 2670
2671 2671 /*
2672 2672 * The MPSMIN and MPSMAX fields in the CAP register use 0 to specify
2673 2673 * the base page size of 4k (1<<12), so add 12 here to get the real
2674 2674 * page size value.
2675 2675 */
2676 2676 nvme->n_pageshift = MIN(MAX(cap.b.cap_mpsmin + 12, PAGESHIFT),
2677 2677 cap.b.cap_mpsmax + 12);
2678 2678 nvme->n_pagesize = 1UL << (nvme->n_pageshift);
2679 2679
2680 2680 /*
2681 2681 * Set up Queue DMA to transfer at least 1 page-aligned page at a time.
2682 2682 */
2683 2683 nvme->n_queue_dma_attr.dma_attr_align = nvme->n_pagesize;
2684 2684 nvme->n_queue_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
2685 2685
2686 2686 /*
2687 2687 * Set up PRP DMA to transfer 1 page-aligned page at a time.
2688 2688 * Maxxfer may be increased after we identified the controller limits.
2689 2689 */
2690 2690 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_pagesize;
2691 2691 nvme->n_prp_dma_attr.dma_attr_minxfer = nvme->n_pagesize;
2692 2692 nvme->n_prp_dma_attr.dma_attr_align = nvme->n_pagesize;
2693 2693 nvme->n_prp_dma_attr.dma_attr_seg = nvme->n_pagesize - 1;
2694 2694
2695 2695 /*
2696 2696 * Reset controller if it's still in ready state.
2697 2697 */
2698 2698 if (nvme_reset(nvme, B_FALSE) == B_FALSE) {
2699 2699 dev_err(nvme->n_dip, CE_WARN, "!unable to reset controller");
2700 2700 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
2701 2701 nvme->n_dead = B_TRUE;
2702 2702 goto fail;
2703 2703 }
2704 2704
2705 2705 /*
2706 2706 * Create the cq array with one completion queue to be assigned
2707 2707 * to the admin queue pair and a limited number of taskqs (4).
2708 2708 */
2709 2709 if (nvme_create_cq_array(nvme, 1, nvme->n_admin_queue_len, 4) !=
2710 2710 DDI_SUCCESS) {
2711 2711 dev_err(nvme->n_dip, CE_WARN,
2712 2712 "!failed to pre-allocate admin completion queue");
2713 2713 goto fail;
2714 2714 }
2715 2715 /*
2716 2716 * Create the admin queue pair.
2717 2717 */
2718 2718 if (nvme_alloc_qpair(nvme, nvme->n_admin_queue_len, &nvme->n_adminq, 0)
2719 2719 != DDI_SUCCESS) {
2720 2720 dev_err(nvme->n_dip, CE_WARN,
2721 2721 "!unable to allocate admin qpair");
2722 2722 goto fail;
2723 2723 }
2724 2724 nvme->n_ioq = kmem_alloc(sizeof (nvme_qpair_t *), KM_SLEEP);
2725 2725 nvme->n_ioq[0] = nvme->n_adminq;
2726 2726
2727 2727 nvme->n_progress |= NVME_ADMIN_QUEUE;
2728 2728
2729 2729 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
2730 2730 "admin-queue-len", nvme->n_admin_queue_len);
2731 2731
2732 2732 aqa.b.aqa_asqs = aqa.b.aqa_acqs = nvme->n_admin_queue_len - 1;
2733 2733 asq = nvme->n_adminq->nq_sqdma->nd_cookie.dmac_laddress;
2734 2734 acq = nvme->n_adminq->nq_cq->ncq_dma->nd_cookie.dmac_laddress;
2735 2735
2736 2736 ASSERT((asq & (nvme->n_pagesize - 1)) == 0);
2737 2737 ASSERT((acq & (nvme->n_pagesize - 1)) == 0);
2738 2738
2739 2739 nvme_put32(nvme, NVME_REG_AQA, aqa.r);
2740 2740 nvme_put64(nvme, NVME_REG_ASQ, asq);
2741 2741 nvme_put64(nvme, NVME_REG_ACQ, acq);
2742 2742
2743 2743 cc.b.cc_ams = 0; /* use Round-Robin arbitration */
2744 2744 cc.b.cc_css = 0; /* use NVM command set */
2745 2745 cc.b.cc_mps = nvme->n_pageshift - 12;
2746 2746 cc.b.cc_shn = 0; /* no shutdown in progress */
2747 2747 cc.b.cc_en = 1; /* enable controller */
2748 2748 cc.b.cc_iosqes = 6; /* submission queue entry is 2^6 bytes long */
2749 2749 cc.b.cc_iocqes = 4; /* completion queue entry is 2^4 bytes long */
2750 2750
2751 2751 nvme_put32(nvme, NVME_REG_CC, cc.r);
2752 2752
2753 2753 /*
2754 2754 * Wait for the controller to become ready.
2755 2755 */
2756 2756 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2757 2757 if (csts.b.csts_rdy == 0) {
2758 2758 for (i = 0; i != nvme->n_timeout * 10; i++) {
2759 2759 delay(drv_usectohz(50000));
2760 2760 csts.r = nvme_get32(nvme, NVME_REG_CSTS);
2761 2761
2762 2762 if (csts.b.csts_cfs == 1) {
2763 2763 dev_err(nvme->n_dip, CE_WARN,
2764 2764 "!controller fatal status at init");
2765 2765 ddi_fm_service_impact(nvme->n_dip,
2766 2766 DDI_SERVICE_LOST);
2767 2767 nvme->n_dead = B_TRUE;
2768 2768 goto fail;
2769 2769 }
2770 2770
2771 2771 if (csts.b.csts_rdy == 1)
2772 2772 break;
2773 2773 }
2774 2774 }
2775 2775
2776 2776 if (csts.b.csts_rdy == 0) {
2777 2777 dev_err(nvme->n_dip, CE_WARN, "!controller not ready");
2778 2778 ddi_fm_service_impact(nvme->n_dip, DDI_SERVICE_LOST);
2779 2779 nvme->n_dead = B_TRUE;
2780 2780 goto fail;
2781 2781 }
2782 2782
2783 2783 /*
2784 2784 * Assume an abort command limit of 1. We'll destroy and re-init
2785 2785 * that later when we know the true abort command limit.
2786 2786 */
2787 2787 sema_init(&nvme->n_abort_sema, 1, NULL, SEMA_DRIVER, NULL);
2788 2788
2789 2789 /*
2790 2790 * Setup initial interrupt for admin queue.
2791 2791 */
2792 2792 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX, 1)
2793 2793 != DDI_SUCCESS) &&
2794 2794 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI, 1)
2795 2795 != DDI_SUCCESS) &&
2796 2796 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_FIXED, 1)
2797 2797 != DDI_SUCCESS)) {
2798 2798 dev_err(nvme->n_dip, CE_WARN,
2799 2799 "!failed to setup initial interrupt");
2800 2800 goto fail;
2801 2801 }
2802 2802
2803 2803 /*
2804 2804 * Post an asynchronous event command to catch errors.
2805 2805 * We assume the asynchronous events are supported as required by
2806 2806 * specification (Figure 40 in section 5 of NVMe 1.2).
2807 2807 * However, since at least qemu does not follow the specification,
2808 2808 * we need a mechanism to protect ourselves.
2809 2809 */
2810 2810 nvme->n_async_event_supported = B_TRUE;
2811 2811 nvme_async_event(nvme);
2812 2812
2813 2813 /*
2814 2814 * Identify Controller
2815 2815 */
2816 2816 if (nvme_identify(nvme, B_FALSE, 0, (void **)&nvme->n_idctl) != 0) {
2817 2817 dev_err(nvme->n_dip, CE_WARN,
2818 2818 "!failed to identify controller");
2819 2819 goto fail;
2820 2820 }
2821 2821
2822 2822 /*
2823 2823 * Get Vendor & Product ID
2824 2824 */
2825 2825 bcopy(nvme->n_idctl->id_model, model, sizeof (nvme->n_idctl->id_model));
2826 2826 model[sizeof (nvme->n_idctl->id_model)] = '\0';
2827 2827 sata_split_model(model, &vendor, &product);
2828 2828
2829 2829 if (vendor == NULL)
2830 2830 nvme->n_vendor = strdup("NVMe");
2831 2831 else
2832 2832 nvme->n_vendor = strdup(vendor);
2833 2833
2834 2834 nvme->n_product = strdup(product);
2835 2835
2836 2836 /*
2837 2837 * Get controller limits.
2838 2838 */
2839 2839 nvme->n_async_event_limit = MAX(NVME_MIN_ASYNC_EVENT_LIMIT,
2840 2840 MIN(nvme->n_admin_queue_len / 10,
2841 2841 MIN(nvme->n_idctl->id_aerl + 1, nvme->n_async_event_limit)));
2842 2842
2843 2843 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
2844 2844 "async-event-limit", nvme->n_async_event_limit);
2845 2845
2846 2846 nvme->n_abort_command_limit = nvme->n_idctl->id_acl + 1;
2847 2847
2848 2848 /*
2849 2849 * Reinitialize the semaphore with the true abort command limit
2850 2850 * supported by the hardware. It's not necessary to disable interrupts
2851 2851 * as only command aborts use the semaphore, and no commands are
2852 2852 * executed or aborted while we're here.
2853 2853 */
2854 2854 sema_destroy(&nvme->n_abort_sema);
2855 2855 sema_init(&nvme->n_abort_sema, nvme->n_abort_command_limit - 1, NULL,
2856 2856 SEMA_DRIVER, NULL);
2857 2857
2858 2858 nvme->n_progress |= NVME_CTRL_LIMITS;
2859 2859
2860 2860 if (nvme->n_idctl->id_mdts == 0)
2861 2861 nvme->n_max_data_transfer_size = nvme->n_pagesize * 65536;
2862 2862 else
2863 2863 nvme->n_max_data_transfer_size =
2864 2864 1ull << (nvme->n_pageshift + nvme->n_idctl->id_mdts);
2865 2865
2866 2866 nvme->n_error_log_len = nvme->n_idctl->id_elpe + 1;
2867 2867
2868 2868 /*
2869 2869 * Limit n_max_data_transfer_size to what we can handle in one PRP.
2870 2870 * Chained PRPs are currently unsupported.
2871 2871 *
2872 2872 * This is a no-op on hardware which doesn't support a transfer size
2873 2873 * big enough to require chained PRPs.
2874 2874 */
2875 2875 nvme->n_max_data_transfer_size = MIN(nvme->n_max_data_transfer_size,
2876 2876 (nvme->n_pagesize / sizeof (uint64_t) * nvme->n_pagesize));
2877 2877
2878 2878 nvme->n_prp_dma_attr.dma_attr_maxxfer = nvme->n_max_data_transfer_size;
2879 2879
2880 2880 /*
2881 2881 * Make sure the minimum/maximum queue entry sizes are not
2882 2882 * larger/smaller than the default.
2883 2883 */
2884 2884
2885 2885 if (((1 << nvme->n_idctl->id_sqes.qes_min) > sizeof (nvme_sqe_t)) ||
2886 2886 ((1 << nvme->n_idctl->id_sqes.qes_max) < sizeof (nvme_sqe_t)) ||
2887 2887 ((1 << nvme->n_idctl->id_cqes.qes_min) > sizeof (nvme_cqe_t)) ||
2888 2888 ((1 << nvme->n_idctl->id_cqes.qes_max) < sizeof (nvme_cqe_t)))
2889 2889 goto fail;
2890 2890
2891 2891 /*
2892 2892 * Check for the presence of a Volatile Write Cache. If present,
2893 2893 * enable or disable based on the value of the property
2894 2894 * volatile-write-cache-enable (default is enabled).
2895 2895 */
2896 2896 nvme->n_write_cache_present =
2897 2897 nvme->n_idctl->id_vwc.vwc_present == 0 ? B_FALSE : B_TRUE;
2898 2898
2899 2899 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
2900 2900 "volatile-write-cache-present",
2901 2901 nvme->n_write_cache_present ? 1 : 0);
2902 2902
2903 2903 if (!nvme->n_write_cache_present) {
2904 2904 nvme->n_write_cache_enabled = B_FALSE;
2905 2905 } else if (nvme_write_cache_set(nvme, nvme->n_write_cache_enabled)
2906 2906 != 0) {
2907 2907 dev_err(nvme->n_dip, CE_WARN,
2908 2908 "!failed to %sable volatile write cache",
2909 2909 nvme->n_write_cache_enabled ? "en" : "dis");
2910 2910 /*
2911 2911 * Assume the cache is (still) enabled.
2912 2912 */
2913 2913 nvme->n_write_cache_enabled = B_TRUE;
2914 2914 }
2915 2915
2916 2916 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip,
2917 2917 "volatile-write-cache-enable",
2918 2918 nvme->n_write_cache_enabled ? 1 : 0);
2919 2919
2920 2920 /*
2921 2921 * Assume LBA Range Type feature is supported. If it isn't this
2922 2922 * will be set to B_FALSE by nvme_get_features().
2923 2923 */
2924 2924 nvme->n_lba_range_supported = B_TRUE;
2925 2925
2926 2926 /*
2927 2927 * Check support for Autonomous Power State Transition.
2928 2928 */
2929 2929 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 1))
2930 2930 nvme->n_auto_pst_supported =
2931 2931 nvme->n_idctl->id_apsta.ap_sup == 0 ? B_FALSE : B_TRUE;
2932 2932
2933 2933 /*
2934 2934 * Assume Software Progress Marker feature is supported. If it isn't
2935 2935 * this will be set to B_FALSE by nvme_get_features().
2936 2936 */
2937 2937 nvme->n_progress_supported = B_TRUE;
2938 2938
2939 2939 /*
2940 2940 * Identify Namespaces
2941 2941 */
2942 2942 nvme->n_namespace_count = nvme->n_idctl->id_nn;
2943 2943
2944 2944 if (nvme->n_namespace_count == 0) {
2945 2945 dev_err(nvme->n_dip, CE_WARN,
2946 2946 "!controllers without namespaces are not supported");
2947 2947 goto fail;
2948 2948 }
2949 2949
2950 2950 if (nvme->n_namespace_count > NVME_MINOR_MAX) {
2951 2951 dev_err(nvme->n_dip, CE_WARN,
2952 2952 "!too many namespaces: %d, limiting to %d\n",
2953 2953 nvme->n_namespace_count, NVME_MINOR_MAX);
2954 2954 nvme->n_namespace_count = NVME_MINOR_MAX;
2955 2955 }
2956 2956
2957 2957 nvme->n_ns = kmem_zalloc(sizeof (nvme_namespace_t) *
2958 2958 nvme->n_namespace_count, KM_SLEEP);
2959 2959
2960 2960 for (i = 0; i != nvme->n_namespace_count; i++) {
2961 2961 mutex_init(&nvme->n_ns[i].ns_minor.nm_mutex, NULL, MUTEX_DRIVER,
2962 2962 NULL);
2963 2963 nvme->n_ns[i].ns_ignore = B_TRUE;
2964 2964 if (nvme_init_ns(nvme, i + 1) != DDI_SUCCESS)
2965 2965 goto fail;
2966 2966 }
2967 2967
2968 2968 /*
2969 2969 * Try to set up MSI/MSI-X interrupts.
2970 2970 */
2971 2971 if ((nvme->n_intr_types & (DDI_INTR_TYPE_MSI | DDI_INTR_TYPE_MSIX))
2972 2972 != 0) {
2973 2973 nvme_release_interrupts(nvme);
2974 2974
2975 2975 nqueues = MIN(UINT16_MAX, ncpus);
2976 2976
2977 2977 if ((nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSIX,
2978 2978 nqueues) != DDI_SUCCESS) &&
2979 2979 (nvme_setup_interrupts(nvme, DDI_INTR_TYPE_MSI,
2980 2980 nqueues) != DDI_SUCCESS)) {
2981 2981 dev_err(nvme->n_dip, CE_WARN,
2982 2982 "!failed to setup MSI/MSI-X interrupts");
2983 2983 goto fail;
2984 2984 }
2985 2985 }
2986 2986
2987 2987 /*
2988 2988 * Create I/O queue pairs.
2989 2989 */
2990 2990
2991 2991 if (nvme_set_nqueues(nvme) != 0) {
2992 2992 dev_err(nvme->n_dip, CE_WARN,
2993 2993 "!failed to set number of I/O queues to %d",
2994 2994 nvme->n_intr_cnt);
2995 2995 goto fail;
2996 2996 }
2997 2997
2998 2998 /*
2999 2999 * Reallocate I/O queue array
3000 3000 */
3001 3001 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *));
3002 3002 nvme->n_ioq = kmem_zalloc(sizeof (nvme_qpair_t *) *
3003 3003 (nvme->n_submission_queues + 1), KM_SLEEP);
3004 3004 nvme->n_ioq[0] = nvme->n_adminq;
3005 3005
3006 3006 /*
3007 3007 * There should always be at least as many submission queues
3008 3008 * as completion queues.
3009 3009 */
3010 3010 ASSERT(nvme->n_submission_queues >= nvme->n_completion_queues);
3011 3011
3012 3012 nvme->n_ioq_count = nvme->n_submission_queues;
3013 3013
3014 3014 nvme->n_io_squeue_len =
3015 3015 MIN(nvme->n_io_squeue_len, nvme->n_max_queue_entries);
3016 3016
3017 3017 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-squeue-len",
3018 3018 nvme->n_io_squeue_len);
3019 3019
3020 3020 /*
3021 3021 * Pre-allocate completion queues.
3022 3022 * When there are the same number of submission and completion
3023 3023 * queues there is no value in having a larger completion
3024 3024 * queue length.
3025 3025 */
3026 3026 if (nvme->n_submission_queues == nvme->n_completion_queues)
3027 3027 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
3028 3028 nvme->n_io_squeue_len);
3029 3029
3030 3030 nvme->n_io_cqueue_len = MIN(nvme->n_io_cqueue_len,
3031 3031 nvme->n_max_queue_entries);
3032 3032
3033 3033 (void) ddi_prop_update_int(DDI_DEV_T_NONE, nvme->n_dip, "io-cqueue-len",
3034 3034 nvme->n_io_cqueue_len);
3035 3035
3036 3036 /*
3037 3037 * Assign the equal quantity of taskq threads to each completion
3038 3038 * queue, capping the total number of threads to the number
3039 3039 * of CPUs.
3040 3040 */
3041 3041 tq_threads = MIN(UINT16_MAX, ncpus) / nvme->n_completion_queues;
3042 3042
3043 3043 /*
3044 3044 * In case the calculation above is zero, we need at least one
3045 3045 * thread per completion queue.
3046 3046 */
3047 3047 tq_threads = MAX(1, tq_threads);
3048 3048
3049 3049 if (nvme_create_cq_array(nvme, nvme->n_completion_queues + 1,
3050 3050 nvme->n_io_cqueue_len, tq_threads) != DDI_SUCCESS) {
3051 3051 dev_err(nvme->n_dip, CE_WARN,
3052 3052 "!failed to pre-allocate completion queues");
3053 3053 goto fail;
3054 3054 }
3055 3055
3056 3056 /*
3057 3057 * If we use less completion queues than interrupt vectors return
3058 3058 * some of the interrupt vectors back to the system.
3059 3059 */
3060 3060 if (nvme->n_completion_queues + 1 < nvme->n_intr_cnt) {
3061 3061 nvme_release_interrupts(nvme);
3062 3062
3063 3063 if (nvme_setup_interrupts(nvme, nvme->n_intr_type,
3064 3064 nvme->n_completion_queues + 1) != DDI_SUCCESS) {
3065 3065 dev_err(nvme->n_dip, CE_WARN,
3066 3066 "!failed to reduce number of interrupts");
3067 3067 goto fail;
3068 3068 }
3069 3069 }
3070 3070
3071 3071 /*
3072 3072 * Alloc & register I/O queue pairs
3073 3073 */
3074 3074
3075 3075 for (i = 1; i != nvme->n_ioq_count + 1; i++) {
3076 3076 if (nvme_alloc_qpair(nvme, nvme->n_io_squeue_len,
3077 3077 &nvme->n_ioq[i], i) != DDI_SUCCESS) {
3078 3078 dev_err(nvme->n_dip, CE_WARN,
3079 3079 "!unable to allocate I/O qpair %d", i);
3080 3080 goto fail;
3081 3081 }
3082 3082
3083 3083 if (nvme_create_io_qpair(nvme, nvme->n_ioq[i], i) != 0) {
3084 3084 dev_err(nvme->n_dip, CE_WARN,
3085 3085 "!unable to create I/O qpair %d", i);
3086 3086 goto fail;
3087 3087 }
3088 3088 }
3089 3089
3090 3090 /*
3091 3091 * Post more asynchronous events commands to reduce event reporting
3092 3092 * latency as suggested by the spec.
3093 3093 */
3094 3094 if (nvme->n_async_event_supported) {
3095 3095 for (i = 1; i != nvme->n_async_event_limit; i++)
3096 3096 nvme_async_event(nvme);
3097 3097 }
3098 3098
3099 3099 return (DDI_SUCCESS);
3100 3100
3101 3101 fail:
3102 3102 (void) nvme_reset(nvme, B_FALSE);
3103 3103 return (DDI_FAILURE);
3104 3104 }
3105 3105
3106 3106 static uint_t
3107 3107 nvme_intr(caddr_t arg1, caddr_t arg2)
3108 3108 {
3109 3109 /*LINTED: E_PTR_BAD_CAST_ALIGN*/
3110 3110 nvme_t *nvme = (nvme_t *)arg1;
3111 3111 int inum = (int)(uintptr_t)arg2;
3112 3112 int ccnt = 0;
3113 3113 int qnum;
3114 3114
3115 3115 if (inum >= nvme->n_intr_cnt)
3116 3116 return (DDI_INTR_UNCLAIMED);
3117 3117
3118 3118 if (nvme->n_dead)
3119 3119 return (nvme->n_intr_type == DDI_INTR_TYPE_FIXED ?
3120 3120 DDI_INTR_UNCLAIMED : DDI_INTR_CLAIMED);
3121 3121
3122 3122 /*
3123 3123 * The interrupt vector a queue uses is calculated as queue_idx %
3124 3124 * intr_cnt in nvme_create_io_qpair(). Iterate through the queue array
3125 3125 * in steps of n_intr_cnt to process all queues using this vector.
3126 3126 */
3127 3127 for (qnum = inum;
3128 3128 qnum < nvme->n_cq_count && nvme->n_cq[qnum] != NULL;
3129 3129 qnum += nvme->n_intr_cnt) {
3130 3130 ccnt += nvme_process_iocq(nvme, nvme->n_cq[qnum]);
3131 3131 }
3132 3132
3133 3133 return (ccnt > 0 ? DDI_INTR_CLAIMED : DDI_INTR_UNCLAIMED);
3134 3134 }
3135 3135
3136 3136 static void
3137 3137 nvme_release_interrupts(nvme_t *nvme)
3138 3138 {
3139 3139 int i;
3140 3140
3141 3141 for (i = 0; i < nvme->n_intr_cnt; i++) {
3142 3142 if (nvme->n_inth[i] == NULL)
3143 3143 break;
3144 3144
3145 3145 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
3146 3146 (void) ddi_intr_block_disable(&nvme->n_inth[i], 1);
3147 3147 else
3148 3148 (void) ddi_intr_disable(nvme->n_inth[i]);
3149 3149
3150 3150 (void) ddi_intr_remove_handler(nvme->n_inth[i]);
3151 3151 (void) ddi_intr_free(nvme->n_inth[i]);
3152 3152 }
3153 3153
3154 3154 kmem_free(nvme->n_inth, nvme->n_inth_sz);
3155 3155 nvme->n_inth = NULL;
3156 3156 nvme->n_inth_sz = 0;
3157 3157
3158 3158 nvme->n_progress &= ~NVME_INTERRUPTS;
3159 3159 }
3160 3160
3161 3161 static int
3162 3162 nvme_setup_interrupts(nvme_t *nvme, int intr_type, int nqpairs)
3163 3163 {
3164 3164 int nintrs, navail, count;
3165 3165 int ret;
3166 3166 int i;
3167 3167
3168 3168 if (nvme->n_intr_types == 0) {
3169 3169 ret = ddi_intr_get_supported_types(nvme->n_dip,
3170 3170 &nvme->n_intr_types);
3171 3171 if (ret != DDI_SUCCESS) {
3172 3172 dev_err(nvme->n_dip, CE_WARN,
3173 3173 "!%s: ddi_intr_get_supported types failed",
3174 3174 __func__);
3175 3175 return (ret);
3176 3176 }
3177 3177 #ifdef __x86
3178 3178 if (get_hwenv() == HW_VMWARE)
3179 3179 nvme->n_intr_types &= ~DDI_INTR_TYPE_MSIX;
3180 3180 #endif
3181 3181 }
3182 3182
3183 3183 if ((nvme->n_intr_types & intr_type) == 0)
3184 3184 return (DDI_FAILURE);
3185 3185
3186 3186 ret = ddi_intr_get_nintrs(nvme->n_dip, intr_type, &nintrs);
3187 3187 if (ret != DDI_SUCCESS) {
3188 3188 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_nintrs failed",
3189 3189 __func__);
3190 3190 return (ret);
3191 3191 }
3192 3192
3193 3193 ret = ddi_intr_get_navail(nvme->n_dip, intr_type, &navail);
3194 3194 if (ret != DDI_SUCCESS) {
3195 3195 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_navail failed",
3196 3196 __func__);
3197 3197 return (ret);
3198 3198 }
3199 3199
3200 3200 /* We want at most one interrupt per queue pair. */
3201 3201 if (navail > nqpairs)
3202 3202 navail = nqpairs;
3203 3203
3204 3204 nvme->n_inth_sz = sizeof (ddi_intr_handle_t) * navail;
3205 3205 nvme->n_inth = kmem_zalloc(nvme->n_inth_sz, KM_SLEEP);
3206 3206
3207 3207 ret = ddi_intr_alloc(nvme->n_dip, nvme->n_inth, intr_type, 0, navail,
3208 3208 &count, 0);
3209 3209 if (ret != DDI_SUCCESS) {
3210 3210 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_alloc failed",
3211 3211 __func__);
3212 3212 goto fail;
3213 3213 }
3214 3214
3215 3215 nvme->n_intr_cnt = count;
3216 3216
3217 3217 ret = ddi_intr_get_pri(nvme->n_inth[0], &nvme->n_intr_pri);
3218 3218 if (ret != DDI_SUCCESS) {
3219 3219 dev_err(nvme->n_dip, CE_WARN, "!%s: ddi_intr_get_pri failed",
3220 3220 __func__);
3221 3221 goto fail;
3222 3222 }
3223 3223
3224 3224 for (i = 0; i < count; i++) {
3225 3225 ret = ddi_intr_add_handler(nvme->n_inth[i], nvme_intr,
3226 3226 (void *)nvme, (void *)(uintptr_t)i);
3227 3227 if (ret != DDI_SUCCESS) {
3228 3228 dev_err(nvme->n_dip, CE_WARN,
3229 3229 "!%s: ddi_intr_add_handler failed", __func__);
3230 3230 goto fail;
3231 3231 }
3232 3232 }
3233 3233
3234 3234 (void) ddi_intr_get_cap(nvme->n_inth[0], &nvme->n_intr_cap);
3235 3235
3236 3236 for (i = 0; i < count; i++) {
3237 3237 if (nvme->n_intr_cap & DDI_INTR_FLAG_BLOCK)
3238 3238 ret = ddi_intr_block_enable(&nvme->n_inth[i], 1);
3239 3239 else
3240 3240 ret = ddi_intr_enable(nvme->n_inth[i]);
3241 3241
3242 3242 if (ret != DDI_SUCCESS) {
3243 3243 dev_err(nvme->n_dip, CE_WARN,
3244 3244 "!%s: enabling interrupt %d failed", __func__, i);
3245 3245 goto fail;
3246 3246 }
3247 3247 }
3248 3248
3249 3249 nvme->n_intr_type = intr_type;
3250 3250
3251 3251 nvme->n_progress |= NVME_INTERRUPTS;
3252 3252
3253 3253 return (DDI_SUCCESS);
3254 3254
3255 3255 fail:
3256 3256 nvme_release_interrupts(nvme);
3257 3257
3258 3258 return (ret);
3259 3259 }
3260 3260
3261 3261 static int
3262 3262 nvme_fm_errcb(dev_info_t *dip, ddi_fm_error_t *fm_error, const void *arg)
3263 3263 {
3264 3264 _NOTE(ARGUNUSED(arg));
3265 3265
3266 3266 pci_ereport_post(dip, fm_error, NULL);
3267 3267 return (fm_error->fme_status);
3268 3268 }
3269 3269
3270 3270 static int
3271 3271 nvme_attach(dev_info_t *dip, ddi_attach_cmd_t cmd)
3272 3272 {
3273 3273 nvme_t *nvme;
3274 3274 int instance;
3275 3275 int nregs;
3276 3276 off_t regsize;
3277 3277 int i;
3278 3278 char name[32];
3279 3279 bd_ops_t ops = nvme_bd_ops;
3280 3280
3281 3281 if (cmd != DDI_ATTACH)
3282 3282 return (DDI_FAILURE);
3283 3283
3284 3284 instance = ddi_get_instance(dip);
3285 3285
3286 3286 if (ddi_soft_state_zalloc(nvme_state, instance) != DDI_SUCCESS)
3287 3287 return (DDI_FAILURE);
3288 3288
3289 3289 nvme = ddi_get_soft_state(nvme_state, instance);
3290 3290 ddi_set_driver_private(dip, nvme);
3291 3291 nvme->n_dip = dip;
3292 3292
3293 3293 mutex_init(&nvme->n_minor.nm_mutex, NULL, MUTEX_DRIVER, NULL);
3294 3294
3295 3295 nvme->n_strict_version = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3296 3296 DDI_PROP_DONTPASS, "strict-version", 1) == 1 ? B_TRUE : B_FALSE;
3297 3297 nvme->n_ignore_unknown_vendor_status = ddi_prop_get_int(DDI_DEV_T_ANY,
3298 3298 dip, DDI_PROP_DONTPASS, "ignore-unknown-vendor-status", 0) == 1 ?
3299 3299 B_TRUE : B_FALSE;
3300 3300 nvme->n_admin_queue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3301 3301 DDI_PROP_DONTPASS, "admin-queue-len", NVME_DEFAULT_ADMIN_QUEUE_LEN);
3302 3302 nvme->n_io_squeue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3303 3303 DDI_PROP_DONTPASS, "io-squeue-len", NVME_DEFAULT_IO_QUEUE_LEN);
3304 3304 /*
3305 3305 * Double up the default for completion queues in case of
3306 3306 * queue sharing.
3307 3307 */
3308 3308 nvme->n_io_cqueue_len = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3309 3309 DDI_PROP_DONTPASS, "io-cqueue-len", 2 * NVME_DEFAULT_IO_QUEUE_LEN);
3310 3310 nvme->n_async_event_limit = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3311 3311 DDI_PROP_DONTPASS, "async-event-limit",
3312 3312 NVME_DEFAULT_ASYNC_EVENT_LIMIT);
3313 3313 nvme->n_write_cache_enabled = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3314 3314 DDI_PROP_DONTPASS, "volatile-write-cache-enable", 1) != 0 ?
3315 3315 B_TRUE : B_FALSE;
3316 3316 nvme->n_min_block_size = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3317 3317 DDI_PROP_DONTPASS, "min-phys-block-size",
3318 3318 NVME_DEFAULT_MIN_BLOCK_SIZE);
3319 3319 nvme->n_submission_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3320 3320 DDI_PROP_DONTPASS, "max-submission-queues", -1);
3321 3321 nvme->n_completion_queues = ddi_prop_get_int(DDI_DEV_T_ANY, dip,
3322 3322 DDI_PROP_DONTPASS, "max-completion-queues", -1);
3323 3323
3324 3324 if (!ISP2(nvme->n_min_block_size) ||
3325 3325 (nvme->n_min_block_size < NVME_DEFAULT_MIN_BLOCK_SIZE)) {
3326 3326 dev_err(dip, CE_WARN, "!min-phys-block-size %s, "
3327 3327 "using default %d", ISP2(nvme->n_min_block_size) ?
3328 3328 "too low" : "not a power of 2",
3329 3329 NVME_DEFAULT_MIN_BLOCK_SIZE);
3330 3330 nvme->n_min_block_size = NVME_DEFAULT_MIN_BLOCK_SIZE;
3331 3331 }
3332 3332
3333 3333 if (nvme->n_submission_queues != -1 &&
3334 3334 (nvme->n_submission_queues < 1 ||
3335 3335 nvme->n_submission_queues > UINT16_MAX)) {
3336 3336 dev_err(dip, CE_WARN, "!\"submission-queues\"=%d is not "
3337 3337 "valid. Must be [1..%d]", nvme->n_submission_queues,
3338 3338 UINT16_MAX);
3339 3339 nvme->n_submission_queues = -1;
3340 3340 }
3341 3341
3342 3342 if (nvme->n_completion_queues != -1 &&
3343 3343 (nvme->n_completion_queues < 1 ||
3344 3344 nvme->n_completion_queues > UINT16_MAX)) {
3345 3345 dev_err(dip, CE_WARN, "!\"completion-queues\"=%d is not "
3346 3346 "valid. Must be [1..%d]", nvme->n_completion_queues,
3347 3347 UINT16_MAX);
3348 3348 nvme->n_completion_queues = -1;
3349 3349 }
3350 3350
3351 3351 if (nvme->n_admin_queue_len < NVME_MIN_ADMIN_QUEUE_LEN)
3352 3352 nvme->n_admin_queue_len = NVME_MIN_ADMIN_QUEUE_LEN;
3353 3353 else if (nvme->n_admin_queue_len > NVME_MAX_ADMIN_QUEUE_LEN)
3354 3354 nvme->n_admin_queue_len = NVME_MAX_ADMIN_QUEUE_LEN;
3355 3355
3356 3356 if (nvme->n_io_squeue_len < NVME_MIN_IO_QUEUE_LEN)
3357 3357 nvme->n_io_squeue_len = NVME_MIN_IO_QUEUE_LEN;
3358 3358 if (nvme->n_io_cqueue_len < NVME_MIN_IO_QUEUE_LEN)
3359 3359 nvme->n_io_cqueue_len = NVME_MIN_IO_QUEUE_LEN;
3360 3360
3361 3361 if (nvme->n_async_event_limit < 1)
3362 3362 nvme->n_async_event_limit = NVME_DEFAULT_ASYNC_EVENT_LIMIT;
3363 3363
3364 3364 nvme->n_reg_acc_attr = nvme_reg_acc_attr;
3365 3365 nvme->n_queue_dma_attr = nvme_queue_dma_attr;
3366 3366 nvme->n_prp_dma_attr = nvme_prp_dma_attr;
3367 3367 nvme->n_sgl_dma_attr = nvme_sgl_dma_attr;
3368 3368
3369 3369 /*
3370 3370 * Setup FMA support.
3371 3371 */
3372 3372 nvme->n_fm_cap = ddi_getprop(DDI_DEV_T_ANY, dip,
3373 3373 DDI_PROP_CANSLEEP | DDI_PROP_DONTPASS, "fm-capable",
3374 3374 DDI_FM_EREPORT_CAPABLE | DDI_FM_ACCCHK_CAPABLE |
3375 3375 DDI_FM_DMACHK_CAPABLE | DDI_FM_ERRCB_CAPABLE);
3376 3376
3377 3377 ddi_fm_init(dip, &nvme->n_fm_cap, &nvme->n_fm_ibc);
3378 3378
3379 3379 if (nvme->n_fm_cap) {
3380 3380 if (nvme->n_fm_cap & DDI_FM_ACCCHK_CAPABLE)
3381 3381 nvme->n_reg_acc_attr.devacc_attr_access =
3382 3382 DDI_FLAGERR_ACC;
3383 3383
3384 3384 if (nvme->n_fm_cap & DDI_FM_DMACHK_CAPABLE) {
3385 3385 nvme->n_prp_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
3386 3386 nvme->n_sgl_dma_attr.dma_attr_flags |= DDI_DMA_FLAGERR;
3387 3387 }
3388 3388
3389 3389 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
3390 3390 DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
3391 3391 pci_ereport_setup(dip);
3392 3392
3393 3393 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
3394 3394 ddi_fm_handler_register(dip, nvme_fm_errcb,
3395 3395 (void *)nvme);
3396 3396 }
3397 3397
3398 3398 nvme->n_progress |= NVME_FMA_INIT;
3399 3399
3400 3400 /*
3401 3401 * The spec defines several register sets. Only the controller
3402 3402 * registers (set 1) are currently used.
3403 3403 */
3404 3404 if (ddi_dev_nregs(dip, &nregs) == DDI_FAILURE ||
3405 3405 nregs < 2 ||
3406 3406 ddi_dev_regsize(dip, 1, ®size) == DDI_FAILURE)
3407 3407 goto fail;
3408 3408
3409 3409 if (ddi_regs_map_setup(dip, 1, &nvme->n_regs, 0, regsize,
3410 3410 &nvme->n_reg_acc_attr, &nvme->n_regh) != DDI_SUCCESS) {
3411 3411 dev_err(dip, CE_WARN, "!failed to map regset 1");
3412 3412 goto fail;
3413 3413 }
3414 3414
3415 3415 nvme->n_progress |= NVME_REGS_MAPPED;
3416 3416
3417 3417 /*
3418 3418 * Create PRP DMA cache
3419 3419 */
3420 3420 (void) snprintf(name, sizeof (name), "%s%d_prp_cache",
3421 3421 ddi_driver_name(dip), ddi_get_instance(dip));
3422 3422 nvme->n_prp_cache = kmem_cache_create(name, sizeof (nvme_dma_t),
3423 3423 0, nvme_prp_dma_constructor, nvme_prp_dma_destructor,
3424 3424 NULL, (void *)nvme, NULL, 0);
3425 3425
3426 3426 if (nvme_init(nvme) != DDI_SUCCESS)
3427 3427 goto fail;
3428 3428
3429 3429 if (!nvme->n_idctl->id_oncs.on_dset_mgmt)
3430 3430 ops.o_free_space = NULL;
3431 3431
3432 3432 /*
3433 3433 * Initialize the driver with the UFM subsystem
3434 3434 */
3435 3435 if (ddi_ufm_init(dip, DDI_UFM_CURRENT_VERSION, &nvme_ufm_ops,
3436 3436 &nvme->n_ufmh, nvme) != 0) {
3437 3437 dev_err(dip, CE_WARN, "!failed to initialize UFM subsystem");
3438 3438 goto fail;
3439 3439 }
3440 3440 mutex_init(&nvme->n_fwslot_mutex, NULL, MUTEX_DRIVER, NULL);
3441 3441 ddi_ufm_update(nvme->n_ufmh);
3442 3442 nvme->n_progress |= NVME_UFM_INIT;
3443 3443
3444 3444 /*
3445 3445 * Attach the blkdev driver for each namespace.
3446 3446 */
3447 3447 for (i = 0; i != nvme->n_namespace_count; i++) {
3448 3448 if (ddi_create_minor_node(nvme->n_dip, nvme->n_ns[i].ns_name,
3449 3449 S_IFCHR, NVME_MINOR(ddi_get_instance(nvme->n_dip), i + 1),
3450 3450 DDI_NT_NVME_ATTACHMENT_POINT, 0) != DDI_SUCCESS) {
3451 3451 dev_err(dip, CE_WARN,
3452 3452 "!failed to create minor node for namespace %d", i);
3453 3453 goto fail;
3454 3454 }
3455 3455
3456 3456 if (nvme->n_ns[i].ns_ignore)
3457 3457 continue;
3458 3458
3459 3459 nvme->n_ns[i].ns_bd_hdl = bd_alloc_handle(&nvme->n_ns[i],
3460 3460 &ops, &nvme->n_prp_dma_attr, KM_SLEEP);
3461 3461
3462 3462 if (nvme->n_ns[i].ns_bd_hdl == NULL) {
3463 3463 dev_err(dip, CE_WARN,
3464 3464 "!failed to get blkdev handle for namespace %d", i);
3465 3465 goto fail;
3466 3466 }
3467 3467
3468 3468 if (bd_attach_handle(dip, nvme->n_ns[i].ns_bd_hdl)
3469 3469 != DDI_SUCCESS) {
3470 3470 dev_err(dip, CE_WARN,
3471 3471 "!failed to attach blkdev handle for namespace %d",
3472 3472 i);
3473 3473 goto fail;
3474 3474 }
3475 3475 }
3476 3476
3477 3477 if (ddi_create_minor_node(dip, "devctl", S_IFCHR,
3478 3478 NVME_MINOR(ddi_get_instance(dip), 0), DDI_NT_NVME_NEXUS, 0)
3479 3479 != DDI_SUCCESS) {
3480 3480 dev_err(dip, CE_WARN, "nvme_attach: "
3481 3481 "cannot create devctl minor node");
3482 3482 goto fail;
3483 3483 }
3484 3484
3485 3485 return (DDI_SUCCESS);
3486 3486
3487 3487 fail:
3488 3488 /* attach successful anyway so that FMA can retire the device */
3489 3489 if (nvme->n_dead)
3490 3490 return (DDI_SUCCESS);
3491 3491
3492 3492 (void) nvme_detach(dip, DDI_DETACH);
3493 3493
3494 3494 return (DDI_FAILURE);
3495 3495 }
3496 3496
3497 3497 static int
3498 3498 nvme_detach(dev_info_t *dip, ddi_detach_cmd_t cmd)
3499 3499 {
3500 3500 int instance, i;
3501 3501 nvme_t *nvme;
3502 3502
3503 3503 if (cmd != DDI_DETACH)
3504 3504 return (DDI_FAILURE);
3505 3505
3506 3506 instance = ddi_get_instance(dip);
3507 3507
3508 3508 nvme = ddi_get_soft_state(nvme_state, instance);
3509 3509
3510 3510 if (nvme == NULL)
3511 3511 return (DDI_FAILURE);
3512 3512
3513 3513 ddi_remove_minor_node(dip, "devctl");
3514 3514 mutex_destroy(&nvme->n_minor.nm_mutex);
3515 3515
3516 3516 if (nvme->n_ns) {
3517 3517 for (i = 0; i != nvme->n_namespace_count; i++) {
3518 3518 ddi_remove_minor_node(dip, nvme->n_ns[i].ns_name);
3519 3519 mutex_destroy(&nvme->n_ns[i].ns_minor.nm_mutex);
3520 3520
3521 3521 if (nvme->n_ns[i].ns_bd_hdl) {
3522 3522 (void) bd_detach_handle(
3523 3523 nvme->n_ns[i].ns_bd_hdl);
3524 3524 bd_free_handle(nvme->n_ns[i].ns_bd_hdl);
3525 3525 }
3526 3526
3527 3527 if (nvme->n_ns[i].ns_idns)
3528 3528 kmem_free(nvme->n_ns[i].ns_idns,
3529 3529 sizeof (nvme_identify_nsid_t));
3530 3530 if (nvme->n_ns[i].ns_devid)
3531 3531 strfree(nvme->n_ns[i].ns_devid);
3532 3532 }
3533 3533
3534 3534 kmem_free(nvme->n_ns, sizeof (nvme_namespace_t) *
3535 3535 nvme->n_namespace_count);
3536 3536 }
3537 3537 if (nvme->n_progress & NVME_UFM_INIT) {
3538 3538 ddi_ufm_fini(nvme->n_ufmh);
3539 3539 mutex_destroy(&nvme->n_fwslot_mutex);
3540 3540 }
3541 3541
3542 3542 if (nvme->n_progress & NVME_INTERRUPTS)
3543 3543 nvme_release_interrupts(nvme);
3544 3544
3545 3545 for (i = 0; i < nvme->n_cq_count; i++) {
3546 3546 if (nvme->n_cq[i]->ncq_cmd_taskq != NULL)
3547 3547 taskq_wait(nvme->n_cq[i]->ncq_cmd_taskq);
3548 3548 }
3549 3549
3550 3550 if (nvme->n_ioq_count > 0) {
3551 3551 for (i = 1; i != nvme->n_ioq_count + 1; i++) {
3552 3552 if (nvme->n_ioq[i] != NULL) {
3553 3553 /* TODO: send destroy queue commands */
3554 3554 nvme_free_qpair(nvme->n_ioq[i]);
3555 3555 }
3556 3556 }
3557 3557
3558 3558 kmem_free(nvme->n_ioq, sizeof (nvme_qpair_t *) *
3559 3559 (nvme->n_ioq_count + 1));
3560 3560 }
3561 3561
3562 3562 if (nvme->n_prp_cache != NULL) {
3563 3563 kmem_cache_destroy(nvme->n_prp_cache);
3564 3564 }
3565 3565
3566 3566 if (nvme->n_progress & NVME_REGS_MAPPED) {
3567 3567 nvme_shutdown(nvme, NVME_CC_SHN_NORMAL, B_FALSE);
3568 3568 (void) nvme_reset(nvme, B_FALSE);
3569 3569 }
3570 3570
3571 3571 if (nvme->n_progress & NVME_CTRL_LIMITS)
3572 3572 sema_destroy(&nvme->n_abort_sema);
3573 3573
3574 3574 if (nvme->n_progress & NVME_ADMIN_QUEUE)
3575 3575 nvme_free_qpair(nvme->n_adminq);
3576 3576
3577 3577 if (nvme->n_cq_count > 0) {
3578 3578 nvme_destroy_cq_array(nvme, 0);
3579 3579 nvme->n_cq = NULL;
3580 3580 nvme->n_cq_count = 0;
3581 3581 }
3582 3582
3583 3583 if (nvme->n_idctl)
3584 3584 kmem_free(nvme->n_idctl, NVME_IDENTIFY_BUFSIZE);
3585 3585
3586 3586 if (nvme->n_progress & NVME_REGS_MAPPED)
3587 3587 ddi_regs_map_free(&nvme->n_regh);
3588 3588
3589 3589 if (nvme->n_progress & NVME_FMA_INIT) {
3590 3590 if (DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
3591 3591 ddi_fm_handler_unregister(nvme->n_dip);
3592 3592
3593 3593 if (DDI_FM_EREPORT_CAP(nvme->n_fm_cap) ||
3594 3594 DDI_FM_ERRCB_CAP(nvme->n_fm_cap))
3595 3595 pci_ereport_teardown(nvme->n_dip);
3596 3596
3597 3597 ddi_fm_fini(nvme->n_dip);
3598 3598 }
3599 3599
3600 3600 if (nvme->n_vendor != NULL)
3601 3601 strfree(nvme->n_vendor);
3602 3602
3603 3603 if (nvme->n_product != NULL)
3604 3604 strfree(nvme->n_product);
3605 3605
3606 3606 ddi_soft_state_free(nvme_state, instance);
3607 3607
3608 3608 return (DDI_SUCCESS);
3609 3609 }
3610 3610
3611 3611 static int
3612 3612 nvme_quiesce(dev_info_t *dip)
3613 3613 {
3614 3614 int instance;
3615 3615 nvme_t *nvme;
3616 3616
3617 3617 instance = ddi_get_instance(dip);
3618 3618
3619 3619 nvme = ddi_get_soft_state(nvme_state, instance);
3620 3620
3621 3621 if (nvme == NULL)
3622 3622 return (DDI_FAILURE);
3623 3623
3624 3624 nvme_shutdown(nvme, NVME_CC_SHN_ABRUPT, B_TRUE);
3625 3625
3626 3626 (void) nvme_reset(nvme, B_TRUE);
3627 3627
3628 3628 return (DDI_FAILURE);
3629 3629 }
3630 3630
3631 3631 static int
3632 3632 nvme_fill_prp(nvme_cmd_t *cmd, bd_xfer_t *xfer)
3633 3633 {
3634 3634 nvme_t *nvme = cmd->nc_nvme;
3635 3635 int nprp_page, nprp;
3636 3636 uint64_t *prp;
3637 3637
3638 3638 if (xfer->x_ndmac == 0)
3639 3639 return (DDI_FAILURE);
3640 3640
3641 3641 cmd->nc_sqe.sqe_dptr.d_prp[0] = xfer->x_dmac.dmac_laddress;
3642 3642
3643 3643 if (xfer->x_ndmac == 1) {
3644 3644 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
3645 3645 return (DDI_SUCCESS);
3646 3646 } else if (xfer->x_ndmac == 2) {
3647 3647 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac);
3648 3648 cmd->nc_sqe.sqe_dptr.d_prp[1] = xfer->x_dmac.dmac_laddress;
3649 3649 return (DDI_SUCCESS);
3650 3650 }
3651 3651
3652 3652 xfer->x_ndmac--;
3653 3653
3654 3654 nprp_page = nvme->n_pagesize / sizeof (uint64_t);
3655 3655 ASSERT(nprp_page > 0);
3656 3656 nprp = (xfer->x_ndmac + nprp_page - 1) / nprp_page;
3657 3657
3658 3658 /*
3659 3659 * We currently don't support chained PRPs and set up our DMA
3660 3660 * attributes to reflect that. If we still get an I/O request
3661 3661 * that needs a chained PRP something is very wrong.
3662 3662 */
3663 3663 VERIFY(nprp == 1);
3664 3664
3665 3665 cmd->nc_dma = kmem_cache_alloc(nvme->n_prp_cache, KM_SLEEP);
3666 3666 bzero(cmd->nc_dma->nd_memp, cmd->nc_dma->nd_len);
3667 3667
3668 3668 cmd->nc_sqe.sqe_dptr.d_prp[1] = cmd->nc_dma->nd_cookie.dmac_laddress;
3669 3669
3670 3670 /*LINTED: E_PTR_BAD_CAST_ALIGN*/
3671 3671 for (prp = (uint64_t *)cmd->nc_dma->nd_memp;
3672 3672 xfer->x_ndmac > 0;
3673 3673 prp++, xfer->x_ndmac--) {
3674 3674 ddi_dma_nextcookie(xfer->x_dmah, &xfer->x_dmac);
3675 3675 *prp = xfer->x_dmac.dmac_laddress;
3676 3676 }
3677 3677
3678 3678 (void) ddi_dma_sync(cmd->nc_dma->nd_dmah, 0, cmd->nc_dma->nd_len,
3679 3679 DDI_DMA_SYNC_FORDEV);
3680 3680 return (DDI_SUCCESS);
3681 3681 }
3682 3682
3683 3683 /*
3684 3684 * The maximum number of requests supported for a deallocate request is
3685 3685 * NVME_DSET_MGMT_MAX_RANGES (256) -- this is from the NVMe 1.1 spec (and
3686 3686 * unchanged through at least 1.4a). The definition of nvme_range_t is also
3687 3687 * from the NVMe 1.1 spec. Together, the result is that all of the ranges for
3688 3688 * a deallocate request will fit into the smallest supported namespace page
3689 3689 * (4k).
3690 3690 */
3691 3691 CTASSERT(sizeof (nvme_range_t) * NVME_DSET_MGMT_MAX_RANGES == 4096);
3692 3692
3693 3693 static int
3694 3694 nvme_fill_ranges(nvme_cmd_t *cmd, bd_xfer_t *xfer, uint64_t blocksize,
3695 3695 int allocflag)
3696 3696 {
3697 3697 const dkioc_free_list_t *dfl = xfer->x_dfl;
3698 3698 const dkioc_free_list_ext_t *exts = dfl->dfl_exts;
3699 3699 nvme_t *nvme = cmd->nc_nvme;
3700 3700 nvme_range_t *ranges = NULL;
3701 3701 uint_t i;
3702 3702
3703 3703 /*
3704 3704 * The number of ranges in the request is 0s based (that is
3705 3705 * word10 == 0 -> 1 range, word10 == 1 -> 2 ranges, ...,
3706 3706 * word10 == 255 -> 256 ranges). Therefore the allowed values are
3707 3707 * [1..NVME_DSET_MGMT_MAX_RANGES]. If blkdev gives us a bad request,
3708 3708 * we either provided bad info in nvme_bd_driveinfo() or there is a bug
3709 3709 * in blkdev.
3710 3710 */
3711 3711 VERIFY3U(dfl->dfl_num_exts, >, 0);
3712 3712 VERIFY3U(dfl->dfl_num_exts, <=, NVME_DSET_MGMT_MAX_RANGES);
3713 3713 cmd->nc_sqe.sqe_cdw10 = (dfl->dfl_num_exts - 1) & 0xff;
3714 3714
3715 3715 cmd->nc_sqe.sqe_cdw11 = NVME_DSET_MGMT_ATTR_DEALLOCATE;
3716 3716
3717 3717 cmd->nc_dma = kmem_cache_alloc(nvme->n_prp_cache, allocflag);
3718 3718 if (cmd->nc_dma == NULL)
3719 3719 return (DDI_FAILURE);
3720 3720
3721 3721 bzero(cmd->nc_dma->nd_memp, cmd->nc_dma->nd_len);
3722 3722 ranges = (nvme_range_t *)cmd->nc_dma->nd_memp;
3723 3723
3724 3724 cmd->nc_sqe.sqe_dptr.d_prp[0] = cmd->nc_dma->nd_cookie.dmac_laddress;
3725 3725 cmd->nc_sqe.sqe_dptr.d_prp[1] = 0;
3726 3726
3727 3727 for (i = 0; i < dfl->dfl_num_exts; i++) {
3728 3728 uint64_t lba, len;
3729 3729
3730 3730 lba = (dfl->dfl_offset + exts[i].dfle_start) / blocksize;
3731 3731 len = exts[i].dfle_length / blocksize;
3732 3732
3733 3733 VERIFY3U(len, <=, UINT32_MAX);
3734 3734
3735 3735 /* No context attributes for a deallocate request */
3736 3736 ranges[i].nr_ctxattr = 0;
3737 3737 ranges[i].nr_len = len;
3738 3738 ranges[i].nr_lba = lba;
3739 3739 }
3740 3740
3741 3741 (void) ddi_dma_sync(cmd->nc_dma->nd_dmah, 0, cmd->nc_dma->nd_len,
3742 3742 DDI_DMA_SYNC_FORDEV);
3743 3743
3744 3744 return (DDI_SUCCESS);
3745 3745 }
3746 3746
3747 3747 static nvme_cmd_t *
3748 3748 nvme_create_nvm_cmd(nvme_namespace_t *ns, uint8_t opc, bd_xfer_t *xfer)
3749 3749 {
3750 3750 nvme_t *nvme = ns->ns_nvme;
3751 3751 nvme_cmd_t *cmd;
3752 3752 int allocflag;
3753 3753
3754 3754 /*
3755 3755 * Blkdev only sets BD_XFER_POLL when dumping, so don't sleep.
3756 3756 */
3757 3757 allocflag = (xfer->x_flags & BD_XFER_POLL) ? KM_NOSLEEP : KM_SLEEP;
3758 3758 cmd = nvme_alloc_cmd(nvme, allocflag);
3759 3759
3760 3760 if (cmd == NULL)
3761 3761 return (NULL);
3762 3762
3763 3763 cmd->nc_sqe.sqe_opc = opc;
3764 3764 cmd->nc_callback = nvme_bd_xfer_done;
3765 3765 cmd->nc_xfer = xfer;
3766 3766
3767 3767 switch (opc) {
3768 3768 case NVME_OPC_NVM_WRITE:
3769 3769 case NVME_OPC_NVM_READ:
3770 3770 VERIFY(xfer->x_nblks <= 0x10000);
3771 3771
3772 3772 cmd->nc_sqe.sqe_nsid = ns->ns_id;
3773 3773
3774 3774 cmd->nc_sqe.sqe_cdw10 = xfer->x_blkno & 0xffffffffu;
3775 3775 cmd->nc_sqe.sqe_cdw11 = (xfer->x_blkno >> 32);
3776 3776 cmd->nc_sqe.sqe_cdw12 = (uint16_t)(xfer->x_nblks - 1);
3777 3777
3778 3778 if (nvme_fill_prp(cmd, xfer) != DDI_SUCCESS)
3779 3779 goto fail;
3780 3780 break;
3781 3781
3782 3782 case NVME_OPC_NVM_FLUSH:
3783 3783 cmd->nc_sqe.sqe_nsid = ns->ns_id;
3784 3784 break;
3785 3785
3786 3786 case NVME_OPC_NVM_DSET_MGMT:
3787 3787 cmd->nc_sqe.sqe_nsid = ns->ns_id;
3788 3788
3789 3789 if (nvme_fill_ranges(cmd, xfer,
3790 3790 (uint64_t)ns->ns_block_size, allocflag) != DDI_SUCCESS)
3791 3791 goto fail;
3792 3792 break;
3793 3793
3794 3794 default:
3795 3795 goto fail;
3796 3796 }
3797 3797
3798 3798 return (cmd);
3799 3799
3800 3800 fail:
3801 3801 nvme_free_cmd(cmd);
3802 3802 return (NULL);
3803 3803 }
3804 3804
3805 3805 static void
3806 3806 nvme_bd_xfer_done(void *arg)
3807 3807 {
3808 3808 nvme_cmd_t *cmd = arg;
3809 3809 bd_xfer_t *xfer = cmd->nc_xfer;
3810 3810 int error = 0;
3811 3811
3812 3812 error = nvme_check_cmd_status(cmd);
3813 3813 nvme_free_cmd(cmd);
3814 3814
3815 3815 bd_xfer_done(xfer, error);
3816 3816 }
3817 3817
3818 3818 static void
3819 3819 nvme_bd_driveinfo(void *arg, bd_drive_t *drive)
3820 3820 {
3821 3821 nvme_namespace_t *ns = arg;
3822 3822 nvme_t *nvme = ns->ns_nvme;
3823 3823 uint_t ns_count = MAX(1, nvme->n_namespaces_attachable);
3824 3824
3825 3825 /*
3826 3826 * Set the blkdev qcount to the number of submission queues.
3827 3827 * It will then create one waitq/runq pair for each submission
3828 3828 * queue and spread I/O requests across the queues.
3829 3829 */
3830 3830 drive->d_qcount = nvme->n_ioq_count;
3831 3831
3832 3832 /*
3833 3833 * I/O activity to individual namespaces is distributed across
3834 3834 * each of the d_qcount blkdev queues (which has been set to
3835 3835 * the number of nvme submission queues). d_qsize is the number
3836 3836 * of submitted and not completed I/Os within each queue that blkdev
3837 3837 * will allow before it starts holding them in the waitq.
3838 3838 *
3839 3839 * Each namespace will create a child blkdev instance, for each one
3840 3840 * we try and set the d_qsize so that each namespace gets an
3841 3841 * equal portion of the submission queue.
3842 3842 *
3843 3843 * If post instantiation of the nvme drive, n_namespaces_attachable
3844 3844 * changes and a namespace is attached it could calculate a
3845 3845 * different d_qsize. It may even be that the sum of the d_qsizes is
3846 3846 * now beyond the submission queue size. Should that be the case
3847 3847 * and the I/O rate is such that blkdev attempts to submit more
3848 3848 * I/Os than the size of the submission queue, the excess I/Os
3849 3849 * will be held behind the semaphore nq_sema.
3850 3850 */
3851 3851 drive->d_qsize = nvme->n_io_squeue_len / ns_count;
3852 3852
3853 3853 /*
3854 3854 * Don't let the queue size drop below the minimum, though.
3855 3855 */
3856 3856 drive->d_qsize = MAX(drive->d_qsize, NVME_MIN_IO_QUEUE_LEN);
3857 3857
3858 3858 /*
3859 3859 * d_maxxfer is not set, which means the value is taken from the DMA
3860 3860 * attributes specified to bd_alloc_handle.
3861 3861 */
3862 3862
3863 3863 drive->d_removable = B_FALSE;
3864 3864 drive->d_hotpluggable = B_FALSE;
3865 3865
3866 3866 bcopy(ns->ns_eui64, drive->d_eui64, sizeof (drive->d_eui64));
3867 3867 drive->d_target = ns->ns_id;
3868 3868 drive->d_lun = 0;
3869 3869
3870 3870 drive->d_model = nvme->n_idctl->id_model;
3871 3871 drive->d_model_len = sizeof (nvme->n_idctl->id_model);
3872 3872 drive->d_vendor = nvme->n_vendor;
3873 3873 drive->d_vendor_len = strlen(nvme->n_vendor);
3874 3874 drive->d_product = nvme->n_product;
3875 3875 drive->d_product_len = strlen(nvme->n_product);
3876 3876 drive->d_serial = nvme->n_idctl->id_serial;
3877 3877 drive->d_serial_len = sizeof (nvme->n_idctl->id_serial);
3878 3878 drive->d_revision = nvme->n_idctl->id_fwrev;
3879 3879 drive->d_revision_len = sizeof (nvme->n_idctl->id_fwrev);
3880 3880
3881 3881 /*
3882 3882 * If we support the dataset management command, the only restrictions
3883 3883 * on a discard request are the maximum number of ranges (segments)
3884 3884 * per single request.
3885 3885 */
3886 3886 if (nvme->n_idctl->id_oncs.on_dset_mgmt)
3887 3887 drive->d_max_free_seg = NVME_DSET_MGMT_MAX_RANGES;
3888 3888 }
3889 3889
3890 3890 static int
3891 3891 nvme_bd_mediainfo(void *arg, bd_media_t *media)
3892 3892 {
3893 3893 nvme_namespace_t *ns = arg;
3894 3894
3895 3895 media->m_nblks = ns->ns_block_count;
3896 3896 media->m_blksize = ns->ns_block_size;
3897 3897 media->m_readonly = B_FALSE;
3898 3898 media->m_solidstate = B_TRUE;
3899 3899
3900 3900 media->m_pblksize = ns->ns_best_block_size;
3901 3901
3902 3902 return (0);
3903 3903 }
3904 3904
3905 3905 static int
3906 3906 nvme_bd_cmd(nvme_namespace_t *ns, bd_xfer_t *xfer, uint8_t opc)
3907 3907 {
3908 3908 nvme_t *nvme = ns->ns_nvme;
3909 3909 nvme_cmd_t *cmd;
3910 3910 nvme_qpair_t *ioq;
3911 3911 boolean_t poll;
3912 3912 int ret;
3913 3913
3914 3914 if (nvme->n_dead)
3915 3915 return (EIO);
3916 3916
3917 3917 cmd = nvme_create_nvm_cmd(ns, opc, xfer);
3918 3918 if (cmd == NULL)
3919 3919 return (ENOMEM);
3920 3920
3921 3921 cmd->nc_sqid = xfer->x_qnum + 1;
3922 3922 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count);
3923 3923 ioq = nvme->n_ioq[cmd->nc_sqid];
3924 3924
3925 3925 /*
3926 3926 * Get the polling flag before submitting the command. The command may
3927 3927 * complete immediately after it was submitted, which means we must
3928 3928 * treat both cmd and xfer as if they have been freed already.
3929 3929 */
3930 3930 poll = (xfer->x_flags & BD_XFER_POLL) != 0;
3931 3931
3932 3932 ret = nvme_submit_io_cmd(ioq, cmd);
3933 3933
3934 3934 if (ret != 0)
3935 3935 return (ret);
3936 3936
3937 3937 if (!poll)
3938 3938 return (0);
3939 3939
3940 3940 do {
3941 3941 cmd = nvme_retrieve_cmd(nvme, ioq);
3942 3942 if (cmd != NULL)
3943 3943 cmd->nc_callback(cmd);
3944 3944 else
3945 3945 drv_usecwait(10);
3946 3946 } while (ioq->nq_active_cmds != 0);
3947 3947
3948 3948 return (0);
3949 3949 }
3950 3950
3951 3951 static int
3952 3952 nvme_bd_read(void *arg, bd_xfer_t *xfer)
3953 3953 {
3954 3954 nvme_namespace_t *ns = arg;
3955 3955
3956 3956 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_READ));
3957 3957 }
3958 3958
3959 3959 static int
3960 3960 nvme_bd_write(void *arg, bd_xfer_t *xfer)
3961 3961 {
3962 3962 nvme_namespace_t *ns = arg;
3963 3963
3964 3964 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_WRITE));
3965 3965 }
3966 3966
3967 3967 static int
3968 3968 nvme_bd_sync(void *arg, bd_xfer_t *xfer)
3969 3969 {
3970 3970 nvme_namespace_t *ns = arg;
3971 3971
3972 3972 if (ns->ns_nvme->n_dead)
3973 3973 return (EIO);
3974 3974
3975 3975 /*
3976 3976 * If the volatile write cache is not present or not enabled the FLUSH
3977 3977 * command is a no-op, so we can take a shortcut here.
3978 3978 */
3979 3979 if (!ns->ns_nvme->n_write_cache_present) {
3980 3980 bd_xfer_done(xfer, ENOTSUP);
3981 3981 return (0);
3982 3982 }
3983 3983
3984 3984 if (!ns->ns_nvme->n_write_cache_enabled) {
3985 3985 bd_xfer_done(xfer, 0);
3986 3986 return (0);
3987 3987 }
3988 3988
3989 3989 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_FLUSH));
3990 3990 }
3991 3991
3992 3992 static int
3993 3993 nvme_bd_devid(void *arg, dev_info_t *devinfo, ddi_devid_t *devid)
3994 3994 {
3995 3995 nvme_namespace_t *ns = arg;
3996 3996
3997 3997 /*LINTED: E_BAD_PTR_CAST_ALIGN*/
3998 3998 if (*(uint64_t *)ns->ns_eui64 != 0) {
3999 3999 return (ddi_devid_init(devinfo, DEVID_SCSI3_WWN,
4000 4000 sizeof (ns->ns_eui64), ns->ns_eui64, devid));
4001 4001 } else {
4002 4002 return (ddi_devid_init(devinfo, DEVID_ENCAP,
4003 4003 strlen(ns->ns_devid), ns->ns_devid, devid));
4004 4004 }
4005 4005 }
4006 4006
4007 4007 static int
4008 4008 nvme_bd_free_space(void *arg, bd_xfer_t *xfer)
4009 4009 {
4010 4010 nvme_namespace_t *ns = arg;
4011 4011
4012 4012 if (xfer->x_dfl == NULL)
4013 4013 return (EINVAL);
4014 4014
4015 4015 if (!ns->ns_nvme->n_idctl->id_oncs.on_dset_mgmt)
4016 4016 return (ENOTSUP);
4017 4017
4018 4018 return (nvme_bd_cmd(ns, xfer, NVME_OPC_NVM_DSET_MGMT));
4019 4019 }
4020 4020
4021 4021 static int
4022 4022 nvme_open(dev_t *devp, int flag, int otyp, cred_t *cred_p)
4023 4023 {
4024 4024 #ifndef __lock_lint
4025 4025 _NOTE(ARGUNUSED(cred_p));
4026 4026 #endif
4027 4027 minor_t minor = getminor(*devp);
4028 4028 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor));
4029 4029 int nsid = NVME_MINOR_NSID(minor);
4030 4030 nvme_minor_state_t *nm;
4031 4031 int rv = 0;
4032 4032
4033 4033 if (otyp != OTYP_CHR)
4034 4034 return (EINVAL);
4035 4035
4036 4036 if (nvme == NULL)
4037 4037 return (ENXIO);
4038 4038
4039 4039 if (nsid > nvme->n_namespace_count)
4040 4040 return (ENXIO);
4041 4041
4042 4042 if (nvme->n_dead)
4043 4043 return (EIO);
4044 4044
4045 4045 nm = nsid == 0 ? &nvme->n_minor : &nvme->n_ns[nsid - 1].ns_minor;
4046 4046
4047 4047 mutex_enter(&nm->nm_mutex);
4048 4048 if (nm->nm_oexcl) {
4049 4049 rv = EBUSY;
4050 4050 goto out;
4051 4051 }
4052 4052
4053 4053 if (flag & FEXCL) {
4054 4054 if (nm->nm_ocnt != 0) {
4055 4055 rv = EBUSY;
4056 4056 goto out;
4057 4057 }
4058 4058 nm->nm_oexcl = B_TRUE;
4059 4059 }
4060 4060
4061 4061 nm->nm_ocnt++;
4062 4062
4063 4063 out:
4064 4064 mutex_exit(&nm->nm_mutex);
4065 4065 return (rv);
4066 4066
4067 4067 }
4068 4068
4069 4069 static int
4070 4070 nvme_close(dev_t dev, int flag, int otyp, cred_t *cred_p)
4071 4071 {
4072 4072 #ifndef __lock_lint
4073 4073 _NOTE(ARGUNUSED(cred_p));
4074 4074 _NOTE(ARGUNUSED(flag));
4075 4075 #endif
4076 4076 minor_t minor = getminor(dev);
4077 4077 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor));
4078 4078 int nsid = NVME_MINOR_NSID(minor);
4079 4079 nvme_minor_state_t *nm;
4080 4080
4081 4081 if (otyp != OTYP_CHR)
4082 4082 return (ENXIO);
4083 4083
4084 4084 if (nvme == NULL)
4085 4085 return (ENXIO);
4086 4086
4087 4087 if (nsid > nvme->n_namespace_count)
4088 4088 return (ENXIO);
4089 4089
4090 4090 nm = nsid == 0 ? &nvme->n_minor : &nvme->n_ns[nsid - 1].ns_minor;
4091 4091
4092 4092 mutex_enter(&nm->nm_mutex);
4093 4093 if (nm->nm_oexcl)
4094 4094 nm->nm_oexcl = B_FALSE;
4095 4095
4096 4096 ASSERT(nm->nm_ocnt > 0);
4097 4097 nm->nm_ocnt--;
4098 4098 mutex_exit(&nm->nm_mutex);
4099 4099
4100 4100 return (0);
4101 4101 }
4102 4102
4103 4103 static int
4104 4104 nvme_ioctl_identify(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
4105 4105 cred_t *cred_p)
4106 4106 {
4107 4107 _NOTE(ARGUNUSED(cred_p));
4108 4108 int rv = 0;
4109 4109 void *idctl;
4110 4110
4111 4111 if ((mode & FREAD) == 0)
4112 4112 return (EPERM);
4113 4113
4114 4114 if (nioc->n_len < NVME_IDENTIFY_BUFSIZE)
4115 4115 return (EINVAL);
4116 4116
4117 4117 if ((rv = nvme_identify(nvme, B_TRUE, nsid, (void **)&idctl)) != 0)
4118 4118 return (rv);
4119 4119
4120 4120 if (ddi_copyout(idctl, (void *)nioc->n_buf, NVME_IDENTIFY_BUFSIZE, mode)
4121 4121 != 0)
4122 4122 rv = EFAULT;
4123 4123
4124 4124 kmem_free(idctl, NVME_IDENTIFY_BUFSIZE);
4125 4125
4126 4126 return (rv);
4127 4127 }
4128 4128
4129 4129 /*
4130 4130 * Execute commands on behalf of the various ioctls.
4131 4131 */
4132 4132 static int
4133 4133 nvme_ioc_cmd(nvme_t *nvme, nvme_sqe_t *sqe, boolean_t is_admin, void *data_addr,
4134 4134 uint32_t data_len, int rwk, nvme_cqe_t *cqe, uint_t timeout)
4135 4135 {
4136 4136 nvme_cmd_t *cmd;
4137 4137 nvme_qpair_t *ioq;
4138 4138 int rv = 0;
4139 4139
4140 4140 cmd = nvme_alloc_cmd(nvme, KM_SLEEP);
4141 4141 if (is_admin) {
4142 4142 cmd->nc_sqid = 0;
4143 4143 ioq = nvme->n_adminq;
4144 4144 } else {
4145 4145 cmd->nc_sqid = (CPU->cpu_id % nvme->n_ioq_count) + 1;
4146 4146 ASSERT(cmd->nc_sqid <= nvme->n_ioq_count);
4147 4147 ioq = nvme->n_ioq[cmd->nc_sqid];
4148 4148 }
4149 4149
4150 4150 cmd->nc_callback = nvme_wakeup_cmd;
4151 4151 cmd->nc_sqe = *sqe;
4152 4152
4153 4153 if ((rwk & (FREAD | FWRITE)) != 0) {
4154 4154 if (data_addr == NULL) {
4155 4155 rv = EINVAL;
4156 4156 goto free_cmd;
4157 4157 }
4158 4158
4159 4159 /*
4160 4160 * Because we use PRPs and haven't implemented PRP
4161 4161 * lists here, the maximum data size is restricted to
4162 4162 * 2 pages.
4163 4163 */
4164 4164 if (data_len > 2 * nvme->n_pagesize) {
4165 4165 dev_err(nvme->n_dip, CE_WARN, "!Data size %u is too "
4166 4166 "large for nvme_ioc_cmd(). Limit is 2 pages "
4167 4167 "(%u bytes)", data_len, 2 * nvme->n_pagesize);
4168 4168
4169 4169 rv = EINVAL;
4170 4170 goto free_cmd;
4171 4171 }
4172 4172
4173 4173 if (nvme_zalloc_dma(nvme, data_len, DDI_DMA_READ,
4174 4174 &nvme->n_prp_dma_attr, &cmd->nc_dma) != DDI_SUCCESS) {
4175 4175 dev_err(nvme->n_dip, CE_WARN,
4176 4176 "!nvme_zalloc_dma failed for nvme_ioc_cmd()");
4177 4177
4178 4178 rv = ENOMEM;
4179 4179 goto free_cmd;
4180 4180 }
4181 4181
4182 4182 if (cmd->nc_dma->nd_ncookie > 2) {
4183 4183 dev_err(nvme->n_dip, CE_WARN,
4184 4184 "!too many DMA cookies for nvme_ioc_cmd()");
4185 4185 atomic_inc_32(&nvme->n_too_many_cookies);
4186 4186
4187 4187 rv = E2BIG;
4188 4188 goto free_cmd;
4189 4189 }
4190 4190
4191 4191 cmd->nc_sqe.sqe_dptr.d_prp[0] =
4192 4192 cmd->nc_dma->nd_cookie.dmac_laddress;
4193 4193
4194 4194 if (cmd->nc_dma->nd_ncookie > 1) {
4195 4195 ddi_dma_nextcookie(cmd->nc_dma->nd_dmah,
4196 4196 &cmd->nc_dma->nd_cookie);
4197 4197 cmd->nc_sqe.sqe_dptr.d_prp[1] =
4198 4198 cmd->nc_dma->nd_cookie.dmac_laddress;
4199 4199 }
4200 4200
4201 4201 if ((rwk & FWRITE) != 0) {
4202 4202 if (ddi_copyin(data_addr, cmd->nc_dma->nd_memp,
4203 4203 data_len, rwk & FKIOCTL) != 0) {
4204 4204 rv = EFAULT;
4205 4205 goto free_cmd;
4206 4206 }
4207 4207 }
4208 4208 }
4209 4209
4210 4210 if (is_admin) {
4211 4211 nvme_admin_cmd(cmd, timeout);
4212 4212 } else {
4213 4213 mutex_enter(&cmd->nc_mutex);
4214 4214
4215 4215 rv = nvme_submit_io_cmd(ioq, cmd);
4216 4216
4217 4217 if (rv == EAGAIN) {
4218 4218 mutex_exit(&cmd->nc_mutex);
4219 4219 dev_err(cmd->nc_nvme->n_dip, CE_WARN,
4220 4220 "!nvme_ioc_cmd() failed, I/O Q full");
4221 4221 goto free_cmd;
4222 4222 }
4223 4223
4224 4224 nvme_wait_cmd(cmd, timeout);
4225 4225
4226 4226 mutex_exit(&cmd->nc_mutex);
4227 4227 }
4228 4228
4229 4229 if (cqe != NULL)
4230 4230 *cqe = cmd->nc_cqe;
4231 4231
4232 4232 if ((rv = nvme_check_cmd_status(cmd)) != 0) {
4233 4233 dev_err(nvme->n_dip, CE_WARN,
4234 4234 "!nvme_ioc_cmd() failed with sct = %x, sc = %x",
4235 4235 cmd->nc_cqe.cqe_sf.sf_sct, cmd->nc_cqe.cqe_sf.sf_sc);
4236 4236
4237 4237 goto free_cmd;
4238 4238 }
4239 4239
4240 4240 if ((rwk & FREAD) != 0) {
4241 4241 if (ddi_copyout(cmd->nc_dma->nd_memp,
4242 4242 data_addr, data_len, rwk & FKIOCTL) != 0)
4243 4243 rv = EFAULT;
4244 4244 }
4245 4245
4246 4246 free_cmd:
4247 4247 nvme_free_cmd(cmd);
4248 4248
4249 4249 return (rv);
4250 4250 }
4251 4251
4252 4252 static int
4253 4253 nvme_ioctl_capabilities(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc,
4254 4254 int mode, cred_t *cred_p)
4255 4255 {
4256 4256 _NOTE(ARGUNUSED(nsid, cred_p));
4257 4257 int rv = 0;
4258 4258 nvme_reg_cap_t cap = { 0 };
4259 4259 nvme_capabilities_t nc;
4260 4260
4261 4261 if ((mode & FREAD) == 0)
4262 4262 return (EPERM);
4263 4263
4264 4264 if (nioc->n_len < sizeof (nc))
4265 4265 return (EINVAL);
4266 4266
4267 4267 cap.r = nvme_get64(nvme, NVME_REG_CAP);
4268 4268
4269 4269 /*
4270 4270 * The MPSMIN and MPSMAX fields in the CAP register use 0 to
4271 4271 * specify the base page size of 4k (1<<12), so add 12 here to
4272 4272 * get the real page size value.
4273 4273 */
4274 4274 nc.mpsmax = 1 << (12 + cap.b.cap_mpsmax);
4275 4275 nc.mpsmin = 1 << (12 + cap.b.cap_mpsmin);
4276 4276
4277 4277 if (ddi_copyout(&nc, (void *)nioc->n_buf, sizeof (nc), mode) != 0)
4278 4278 rv = EFAULT;
4279 4279
4280 4280 return (rv);
4281 4281 }
4282 4282
4283 4283 static int
4284 4284 nvme_ioctl_get_logpage(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc,
4285 4285 int mode, cred_t *cred_p)
4286 4286 {
4287 4287 _NOTE(ARGUNUSED(cred_p));
4288 4288 void *log = NULL;
4289 4289 size_t bufsize = 0;
4290 4290 int rv = 0;
4291 4291
4292 4292 if ((mode & FREAD) == 0)
4293 4293 return (EPERM);
4294 4294
4295 4295 switch (nioc->n_arg) {
4296 4296 case NVME_LOGPAGE_ERROR:
4297 4297 if (nsid != 0)
4298 4298 return (EINVAL);
4299 4299 break;
4300 4300 case NVME_LOGPAGE_HEALTH:
4301 4301 if (nsid != 0 && nvme->n_idctl->id_lpa.lp_smart == 0)
4302 4302 return (EINVAL);
4303 4303
4304 4304 if (nsid == 0)
4305 4305 nsid = (uint32_t)-1;
4306 4306
4307 4307 break;
4308 4308 case NVME_LOGPAGE_FWSLOT:
4309 4309 if (nsid != 0)
4310 4310 return (EINVAL);
4311 4311 break;
4312 4312 default:
4313 4313 return (EINVAL);
4314 4314 }
4315 4315
4316 4316 if (nvme_get_logpage(nvme, B_TRUE, &log, &bufsize, nioc->n_arg, nsid)
4317 4317 != DDI_SUCCESS)
4318 4318 return (EIO);
4319 4319
4320 4320 if (nioc->n_len < bufsize) {
4321 4321 kmem_free(log, bufsize);
4322 4322 return (EINVAL);
4323 4323 }
4324 4324
4325 4325 if (ddi_copyout(log, (void *)nioc->n_buf, bufsize, mode) != 0)
4326 4326 rv = EFAULT;
4327 4327
4328 4328 nioc->n_len = bufsize;
4329 4329 kmem_free(log, bufsize);
4330 4330
4331 4331 return (rv);
4332 4332 }
4333 4333
4334 4334 static int
4335 4335 nvme_ioctl_get_features(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc,
4336 4336 int mode, cred_t *cred_p)
4337 4337 {
4338 4338 _NOTE(ARGUNUSED(cred_p));
4339 4339 void *buf = NULL;
4340 4340 size_t bufsize = 0;
4341 4341 uint32_t res = 0;
4342 4342 uint8_t feature;
4343 4343 int rv = 0;
4344 4344
4345 4345 if ((mode & FREAD) == 0)
4346 4346 return (EPERM);
4347 4347
4348 4348 if ((nioc->n_arg >> 32) > 0xff)
4349 4349 return (EINVAL);
4350 4350
4351 4351 feature = (uint8_t)(nioc->n_arg >> 32);
4352 4352
4353 4353 switch (feature) {
4354 4354 case NVME_FEAT_ARBITRATION:
4355 4355 case NVME_FEAT_POWER_MGMT:
4356 4356 case NVME_FEAT_ERROR:
4357 4357 case NVME_FEAT_NQUEUES:
4358 4358 case NVME_FEAT_INTR_COAL:
4359 4359 case NVME_FEAT_WRITE_ATOM:
4360 4360 case NVME_FEAT_ASYNC_EVENT:
4361 4361 case NVME_FEAT_PROGRESS:
4362 4362 if (nsid != 0)
4363 4363 return (EINVAL);
4364 4364 break;
4365 4365
4366 4366 case NVME_FEAT_TEMPERATURE:
4367 4367 if (nsid != 0)
4368 4368 return (EINVAL);
4369 4369 res = nioc->n_arg & 0xffffffffUL;
4370 4370 if (NVME_VERSION_ATLEAST(&nvme->n_version, 1, 2)) {
4371 4371 nvme_temp_threshold_t tt;
4372 4372
4373 4373 tt.r = res;
4374 4374 if (tt.b.tt_thsel != NVME_TEMP_THRESH_OVER &&
4375 4375 tt.b.tt_thsel != NVME_TEMP_THRESH_UNDER) {
4376 4376 return (EINVAL);
4377 4377 }
4378 4378
4379 4379 if (tt.b.tt_tmpsel > NVME_TEMP_THRESH_MAX_SENSOR) {
4380 4380 return (EINVAL);
4381 4381 }
4382 4382 } else if (res != 0) {
4383 4383 return (EINVAL);
4384 4384 }
4385 4385 break;
4386 4386
4387 4387 case NVME_FEAT_INTR_VECT:
4388 4388 if (nsid != 0)
4389 4389 return (EINVAL);
4390 4390
4391 4391 res = nioc->n_arg & 0xffffffffUL;
4392 4392 if (res >= nvme->n_intr_cnt)
4393 4393 return (EINVAL);
4394 4394 break;
4395 4395
4396 4396 case NVME_FEAT_LBA_RANGE:
4397 4397 if (nvme->n_lba_range_supported == B_FALSE)
4398 4398 return (EINVAL);
4399 4399
4400 4400 if (nsid == 0 ||
4401 4401 nsid > nvme->n_namespace_count)
4402 4402 return (EINVAL);
4403 4403
4404 4404 break;
4405 4405
4406 4406 case NVME_FEAT_WRITE_CACHE:
4407 4407 if (nsid != 0)
4408 4408 return (EINVAL);
4409 4409
4410 4410 if (!nvme->n_write_cache_present)
4411 4411 return (EINVAL);
4412 4412
4413 4413 break;
4414 4414
4415 4415 case NVME_FEAT_AUTO_PST:
4416 4416 if (nsid != 0)
4417 4417 return (EINVAL);
4418 4418
4419 4419 if (!nvme->n_auto_pst_supported)
4420 4420 return (EINVAL);
4421 4421
4422 4422 break;
4423 4423
4424 4424 default:
4425 4425 return (EINVAL);
4426 4426 }
4427 4427
4428 4428 rv = nvme_get_features(nvme, B_TRUE, nsid, feature, &res, &buf,
4429 4429 &bufsize);
4430 4430 if (rv != 0)
4431 4431 return (rv);
4432 4432
4433 4433 if (nioc->n_len < bufsize) {
4434 4434 kmem_free(buf, bufsize);
4435 4435 return (EINVAL);
4436 4436 }
4437 4437
4438 4438 if (buf && ddi_copyout(buf, (void*)nioc->n_buf, bufsize, mode) != 0)
4439 4439 rv = EFAULT;
4440 4440
4441 4441 kmem_free(buf, bufsize);
4442 4442 nioc->n_arg = res;
4443 4443 nioc->n_len = bufsize;
4444 4444
4445 4445 return (rv);
4446 4446 }
4447 4447
4448 4448 static int
4449 4449 nvme_ioctl_intr_cnt(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
4450 4450 cred_t *cred_p)
4451 4451 {
4452 4452 _NOTE(ARGUNUSED(nsid, mode, cred_p));
4453 4453
4454 4454 if ((mode & FREAD) == 0)
4455 4455 return (EPERM);
4456 4456
4457 4457 nioc->n_arg = nvme->n_intr_cnt;
4458 4458 return (0);
4459 4459 }
4460 4460
4461 4461 static int
4462 4462 nvme_ioctl_version(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
4463 4463 cred_t *cred_p)
4464 4464 {
4465 4465 _NOTE(ARGUNUSED(nsid, cred_p));
4466 4466 int rv = 0;
4467 4467
4468 4468 if ((mode & FREAD) == 0)
4469 4469 return (EPERM);
4470 4470
4471 4471 if (nioc->n_len < sizeof (nvme->n_version))
4472 4472 return (ENOMEM);
4473 4473
4474 4474 if (ddi_copyout(&nvme->n_version, (void *)nioc->n_buf,
4475 4475 sizeof (nvme->n_version), mode) != 0)
4476 4476 rv = EFAULT;
4477 4477
4478 4478 return (rv);
4479 4479 }
4480 4480
4481 4481 static int
4482 4482 nvme_ioctl_format(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
4483 4483 cred_t *cred_p)
4484 4484 {
4485 4485 _NOTE(ARGUNUSED(mode));
4486 4486 nvme_format_nvm_t frmt = { 0 };
4487 4487 int c_nsid = nsid != 0 ? nsid - 1 : 0;
4488 4488
4489 4489 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0)
4490 4490 return (EPERM);
4491 4491
4492 4492 frmt.r = nioc->n_arg & 0xffffffff;
4493 4493
4494 4494 /*
4495 4495 * Check whether the FORMAT NVM command is supported.
4496 4496 */
4497 4497 if (nvme->n_idctl->id_oacs.oa_format == 0)
4498 4498 return (EINVAL);
4499 4499
4500 4500 /*
4501 4501 * Don't allow format or secure erase of individual namespace if that
4502 4502 * would cause a format or secure erase of all namespaces.
4503 4503 */
4504 4504 if (nsid != 0 && nvme->n_idctl->id_fna.fn_format != 0)
4505 4505 return (EINVAL);
4506 4506
4507 4507 if (nsid != 0 && frmt.b.fm_ses != NVME_FRMT_SES_NONE &&
4508 4508 nvme->n_idctl->id_fna.fn_sec_erase != 0)
4509 4509 return (EINVAL);
4510 4510
4511 4511 /*
4512 4512 * Don't allow formatting with Protection Information.
4513 4513 */
4514 4514 if (frmt.b.fm_pi != 0 || frmt.b.fm_pil != 0 || frmt.b.fm_ms != 0)
4515 4515 return (EINVAL);
4516 4516
4517 4517 /*
4518 4518 * Don't allow formatting using an illegal LBA format, or any LBA format
4519 4519 * that uses metadata.
4520 4520 */
4521 4521 if (frmt.b.fm_lbaf > nvme->n_ns[c_nsid].ns_idns->id_nlbaf ||
4522 4522 nvme->n_ns[c_nsid].ns_idns->id_lbaf[frmt.b.fm_lbaf].lbaf_ms != 0)
4523 4523 return (EINVAL);
4524 4524
4525 4525 /*
4526 4526 * Don't allow formatting using an illegal Secure Erase setting.
4527 4527 */
4528 4528 if (frmt.b.fm_ses > NVME_FRMT_MAX_SES ||
4529 4529 (frmt.b.fm_ses == NVME_FRMT_SES_CRYPTO &&
4530 4530 nvme->n_idctl->id_fna.fn_crypt_erase == 0))
4531 4531 return (EINVAL);
4532 4532
4533 4533 if (nsid == 0)
4534 4534 nsid = (uint32_t)-1;
4535 4535
4536 4536 return (nvme_format_nvm(nvme, B_TRUE, nsid, frmt.b.fm_lbaf, B_FALSE, 0,
4537 4537 B_FALSE, frmt.b.fm_ses));
4538 4538 }
4539 4539
4540 4540 static int
4541 4541 nvme_ioctl_detach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
4542 4542 cred_t *cred_p)
|
↓ open down ↓ |
4542 lines elided |
↑ open up ↑ |
4543 4543 {
4544 4544 _NOTE(ARGUNUSED(nioc, mode));
4545 4545 int rv = 0;
4546 4546
4547 4547 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0)
4548 4548 return (EPERM);
4549 4549
4550 4550 if (nsid == 0)
4551 4551 return (EINVAL);
4552 4552
4553 + if (nvme->n_ns[nsid - 1].ns_ignore)
4554 + return (0);
4555 +
4553 4556 rv = bd_detach_handle(nvme->n_ns[nsid - 1].ns_bd_hdl);
4554 4557 if (rv != DDI_SUCCESS)
4555 4558 rv = EBUSY;
4556 4559
4557 4560 return (rv);
4558 4561 }
4559 4562
4560 4563 static int
4561 4564 nvme_ioctl_attach(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc, int mode,
4562 4565 cred_t *cred_p)
4563 4566 {
4564 4567 _NOTE(ARGUNUSED(nioc, mode));
4565 4568 nvme_identify_nsid_t *idns;
4566 4569 int rv = 0;
4567 4570
4568 4571 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0)
4569 4572 return (EPERM);
4570 4573
4571 4574 if (nsid == 0)
4572 4575 return (EINVAL);
|
↓ open down ↓ |
10 lines elided |
↑ open up ↑ |
4573 4576
4574 4577 /*
4575 4578 * Identify namespace again, free old identify data.
4576 4579 */
4577 4580 idns = nvme->n_ns[nsid - 1].ns_idns;
4578 4581 if (nvme_init_ns(nvme, nsid) != DDI_SUCCESS)
4579 4582 return (EIO);
4580 4583
4581 4584 kmem_free(idns, sizeof (nvme_identify_nsid_t));
4582 4585
4586 + if (nvme->n_ns[nsid - 1].ns_ignore)
4587 + return (ENOTSUP);
4588 +
4589 + if (nvme->n_ns[nsid - 1].ns_bd_hdl == NULL)
4590 + nvme->n_ns[nsid - 1].ns_bd_hdl = bd_alloc_handle(
4591 + &nvme->n_ns[nsid - 1], &nvme_bd_ops, &nvme->n_prp_dma_attr,
4592 + KM_SLEEP);
4593 +
4583 4594 rv = bd_attach_handle(nvme->n_dip, nvme->n_ns[nsid - 1].ns_bd_hdl);
4584 4595 if (rv != DDI_SUCCESS)
4585 4596 rv = EBUSY;
4586 4597
4587 4598 return (rv);
4588 4599 }
4589 4600
4590 4601 static void
4591 4602 nvme_ufm_update(nvme_t *nvme)
4592 4603 {
4593 4604 mutex_enter(&nvme->n_fwslot_mutex);
4594 4605 ddi_ufm_update(nvme->n_ufmh);
4595 4606 if (nvme->n_fwslot != NULL) {
4596 4607 kmem_free(nvme->n_fwslot, sizeof (nvme_fwslot_log_t));
4597 4608 nvme->n_fwslot = NULL;
4598 4609 }
4599 4610 mutex_exit(&nvme->n_fwslot_mutex);
4600 4611 }
4601 4612
4602 4613 static int
4603 4614 nvme_ioctl_firmware_download(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc,
4604 4615 int mode, cred_t *cred_p)
4605 4616 {
4606 4617 int rv = 0;
4607 4618 size_t len, copylen;
4608 4619 offset_t offset;
4609 4620 uintptr_t buf;
4610 4621 nvme_sqe_t sqe = {
4611 4622 .sqe_opc = NVME_OPC_FW_IMAGE_LOAD
4612 4623 };
4613 4624
4614 4625 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0)
4615 4626 return (EPERM);
4616 4627
4617 4628 if (nsid != 0)
4618 4629 return (EINVAL);
4619 4630
4620 4631 /*
4621 4632 * The offset (in n_len) is restricted to the number of DWORDs in
4622 4633 * 32 bits.
4623 4634 */
4624 4635 if (nioc->n_len > NVME_FW_OFFSETB_MAX)
4625 4636 return (EINVAL);
4626 4637
4627 4638 /* Confirm that both offset and length are a multiple of DWORD bytes */
4628 4639 if ((nioc->n_len & NVME_DWORD_MASK) != 0 ||
4629 4640 (nioc->n_arg & NVME_DWORD_MASK) != 0)
4630 4641 return (EINVAL);
4631 4642
4632 4643 len = nioc->n_len;
4633 4644 offset = nioc->n_arg;
4634 4645 buf = (uintptr_t)nioc->n_buf;
4635 4646 while (len > 0 && rv == 0) {
4636 4647 /*
4637 4648 * nvme_ioc_cmd() does not use SGLs or PRP lists.
4638 4649 * It is limited to 2 PRPs per NVM command, so limit
4639 4650 * the size of the data to 2 pages.
4640 4651 */
4641 4652 copylen = MIN(2 * nvme->n_pagesize, len);
4642 4653
4643 4654 sqe.sqe_cdw10 = (uint32_t)(copylen >> NVME_DWORD_SHIFT) - 1;
4644 4655 sqe.sqe_cdw11 = (uint32_t)(offset >> NVME_DWORD_SHIFT);
4645 4656
4646 4657 rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, (void *)buf, copylen,
4647 4658 FWRITE, NULL, nvme_admin_cmd_timeout);
4648 4659
4649 4660 buf += copylen;
4650 4661 offset += copylen;
4651 4662 len -= copylen;
4652 4663 }
4653 4664
4654 4665 /*
4655 4666 * Let the DDI UFM subsystem know that the firmware information for
4656 4667 * this device has changed.
4657 4668 */
4658 4669 nvme_ufm_update(nvme);
4659 4670
4660 4671 return (rv);
4661 4672 }
4662 4673
4663 4674 static int
4664 4675 nvme_ioctl_firmware_commit(nvme_t *nvme, int nsid, nvme_ioctl_t *nioc,
4665 4676 int mode, cred_t *cred_p)
4666 4677 {
4667 4678 nvme_firmware_commit_dw10_t fc_dw10 = { 0 };
4668 4679 uint32_t slot = nioc->n_arg & 0xffffffff;
4669 4680 uint32_t action = nioc->n_arg >> 32;
4670 4681 nvme_cqe_t cqe = { 0 };
4671 4682 nvme_sqe_t sqe = {
4672 4683 .sqe_opc = NVME_OPC_FW_ACTIVATE
4673 4684 };
4674 4685 int timeout;
4675 4686 int rv;
4676 4687
4677 4688 if ((mode & FWRITE) == 0 || secpolicy_sys_config(cred_p, B_FALSE) != 0)
4678 4689 return (EPERM);
4679 4690
4680 4691 if (nsid != 0)
4681 4692 return (EINVAL);
4682 4693
4683 4694 /* Validate slot is in range. */
4684 4695 if (slot < NVME_FW_SLOT_MIN || slot > NVME_FW_SLOT_MAX)
4685 4696 return (EINVAL);
4686 4697
4687 4698 switch (action) {
4688 4699 case NVME_FWC_SAVE:
4689 4700 case NVME_FWC_SAVE_ACTIVATE:
4690 4701 timeout = nvme_commit_save_cmd_timeout;
4691 4702 break;
4692 4703 case NVME_FWC_ACTIVATE:
4693 4704 case NVME_FWC_ACTIVATE_IMMED:
4694 4705 timeout = nvme_admin_cmd_timeout;
4695 4706 break;
4696 4707 default:
4697 4708 return (EINVAL);
4698 4709 }
4699 4710
4700 4711 fc_dw10.b.fc_slot = slot;
4701 4712 fc_dw10.b.fc_action = action;
4702 4713 sqe.sqe_cdw10 = fc_dw10.r;
4703 4714
4704 4715 rv = nvme_ioc_cmd(nvme, &sqe, B_TRUE, NULL, 0, 0, &cqe, timeout);
4705 4716
4706 4717 nioc->n_arg = ((uint64_t)cqe.cqe_sf.sf_sct << 16) | cqe.cqe_sf.sf_sc;
4707 4718
4708 4719 /*
4709 4720 * Let the DDI UFM subsystem know that the firmware information for
4710 4721 * this device has changed.
4711 4722 */
4712 4723 nvme_ufm_update(nvme);
4713 4724
4714 4725 return (rv);
4715 4726 }
4716 4727
4717 4728 static int
4718 4729 nvme_ioctl(dev_t dev, int cmd, intptr_t arg, int mode, cred_t *cred_p,
4719 4730 int *rval_p)
4720 4731 {
4721 4732 #ifndef __lock_lint
4722 4733 _NOTE(ARGUNUSED(rval_p));
4723 4734 #endif
4724 4735 minor_t minor = getminor(dev);
4725 4736 nvme_t *nvme = ddi_get_soft_state(nvme_state, NVME_MINOR_INST(minor));
4726 4737 int nsid = NVME_MINOR_NSID(minor);
4727 4738 int rv = 0;
4728 4739 nvme_ioctl_t nioc;
4729 4740
4730 4741 int (*nvme_ioctl[])(nvme_t *, int, nvme_ioctl_t *, int, cred_t *) = {
4731 4742 NULL,
4732 4743 nvme_ioctl_identify,
4733 4744 nvme_ioctl_identify,
4734 4745 nvme_ioctl_capabilities,
4735 4746 nvme_ioctl_get_logpage,
4736 4747 nvme_ioctl_get_features,
4737 4748 nvme_ioctl_intr_cnt,
4738 4749 nvme_ioctl_version,
4739 4750 nvme_ioctl_format,
4740 4751 nvme_ioctl_detach,
4741 4752 nvme_ioctl_attach,
4742 4753 nvme_ioctl_firmware_download,
4743 4754 nvme_ioctl_firmware_commit
4744 4755 };
4745 4756
4746 4757 if (nvme == NULL)
4747 4758 return (ENXIO);
4748 4759
4749 4760 if (nsid > nvme->n_namespace_count)
4750 4761 return (ENXIO);
4751 4762
4752 4763 if (IS_DEVCTL(cmd))
4753 4764 return (ndi_devctl_ioctl(nvme->n_dip, cmd, arg, mode, 0));
4754 4765
4755 4766 #ifdef _MULTI_DATAMODEL
4756 4767 switch (ddi_model_convert_from(mode & FMODELS)) {
4757 4768 case DDI_MODEL_ILP32: {
4758 4769 nvme_ioctl32_t nioc32;
4759 4770 if (ddi_copyin((void*)arg, &nioc32, sizeof (nvme_ioctl32_t),
4760 4771 mode) != 0)
4761 4772 return (EFAULT);
4762 4773 nioc.n_len = nioc32.n_len;
4763 4774 nioc.n_buf = nioc32.n_buf;
4764 4775 nioc.n_arg = nioc32.n_arg;
4765 4776 break;
4766 4777 }
4767 4778 case DDI_MODEL_NONE:
4768 4779 #endif
4769 4780 if (ddi_copyin((void*)arg, &nioc, sizeof (nvme_ioctl_t), mode)
4770 4781 != 0)
4771 4782 return (EFAULT);
4772 4783 #ifdef _MULTI_DATAMODEL
4773 4784 break;
4774 4785 }
4775 4786 #endif
4776 4787
4777 4788 if (nvme->n_dead && cmd != NVME_IOC_DETACH)
4778 4789 return (EIO);
4779 4790
4780 4791
4781 4792 if (cmd == NVME_IOC_IDENTIFY_CTRL) {
4782 4793 /*
4783 4794 * This makes NVME_IOC_IDENTIFY_CTRL work the same on devctl and
4784 4795 * attachment point nodes.
4785 4796 */
4786 4797 nsid = 0;
4787 4798 } else if (cmd == NVME_IOC_IDENTIFY_NSID && nsid == 0) {
4788 4799 /*
4789 4800 * This makes NVME_IOC_IDENTIFY_NSID work on a devctl node, it
4790 4801 * will always return identify data for namespace 1.
4791 4802 */
4792 4803 nsid = 1;
4793 4804 }
4794 4805
4795 4806 if (IS_NVME_IOC(cmd) && nvme_ioctl[NVME_IOC_CMD(cmd)] != NULL)
4796 4807 rv = nvme_ioctl[NVME_IOC_CMD(cmd)](nvme, nsid, &nioc, mode,
4797 4808 cred_p);
4798 4809 else
4799 4810 rv = EINVAL;
4800 4811
4801 4812 #ifdef _MULTI_DATAMODEL
4802 4813 switch (ddi_model_convert_from(mode & FMODELS)) {
4803 4814 case DDI_MODEL_ILP32: {
4804 4815 nvme_ioctl32_t nioc32;
4805 4816
4806 4817 nioc32.n_len = (size32_t)nioc.n_len;
4807 4818 nioc32.n_buf = (uintptr32_t)nioc.n_buf;
4808 4819 nioc32.n_arg = nioc.n_arg;
4809 4820
4810 4821 if (ddi_copyout(&nioc32, (void *)arg, sizeof (nvme_ioctl32_t),
4811 4822 mode) != 0)
4812 4823 return (EFAULT);
4813 4824 break;
4814 4825 }
4815 4826 case DDI_MODEL_NONE:
4816 4827 #endif
4817 4828 if (ddi_copyout(&nioc, (void *)arg, sizeof (nvme_ioctl_t), mode)
4818 4829 != 0)
4819 4830 return (EFAULT);
4820 4831 #ifdef _MULTI_DATAMODEL
4821 4832 break;
4822 4833 }
4823 4834 #endif
4824 4835
4825 4836 return (rv);
4826 4837 }
4827 4838
4828 4839 /*
4829 4840 * DDI UFM Callbacks
4830 4841 */
4831 4842 static int
4832 4843 nvme_ufm_fill_image(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
4833 4844 ddi_ufm_image_t *img)
4834 4845 {
4835 4846 nvme_t *nvme = arg;
4836 4847
4837 4848 if (imgno != 0)
4838 4849 return (EINVAL);
4839 4850
4840 4851 ddi_ufm_image_set_desc(img, "Firmware");
4841 4852 ddi_ufm_image_set_nslots(img, nvme->n_idctl->id_frmw.fw_nslot);
4842 4853
4843 4854 return (0);
4844 4855 }
4845 4856
4846 4857 /*
4847 4858 * Fill out firmware slot information for the requested slot. The firmware
4848 4859 * slot information is gathered by requesting the Firmware Slot Information log
4849 4860 * page. The format of the page is described in section 5.10.1.3.
4850 4861 *
4851 4862 * We lazily cache the log page on the first call and then invalidate the cache
4852 4863 * data after a successful firmware download or firmware commit command.
4853 4864 * The cached data is protected by a mutex as the state can change
4854 4865 * asynchronous to this callback.
4855 4866 */
4856 4867 static int
4857 4868 nvme_ufm_fill_slot(ddi_ufm_handle_t *ufmh, void *arg, uint_t imgno,
4858 4869 uint_t slotno, ddi_ufm_slot_t *slot)
4859 4870 {
4860 4871 nvme_t *nvme = arg;
4861 4872 void *log = NULL;
4862 4873 size_t bufsize;
4863 4874 ddi_ufm_attr_t attr = 0;
4864 4875 char fw_ver[NVME_FWVER_SZ + 1];
4865 4876 int ret;
4866 4877
4867 4878 if (imgno > 0 || slotno > (nvme->n_idctl->id_frmw.fw_nslot - 1))
4868 4879 return (EINVAL);
4869 4880
4870 4881 mutex_enter(&nvme->n_fwslot_mutex);
4871 4882 if (nvme->n_fwslot == NULL) {
4872 4883 ret = nvme_get_logpage(nvme, B_TRUE, &log, &bufsize,
4873 4884 NVME_LOGPAGE_FWSLOT, 0);
4874 4885 if (ret != DDI_SUCCESS ||
4875 4886 bufsize != sizeof (nvme_fwslot_log_t)) {
4876 4887 if (log != NULL)
4877 4888 kmem_free(log, bufsize);
4878 4889 mutex_exit(&nvme->n_fwslot_mutex);
4879 4890 return (EIO);
4880 4891 }
4881 4892 nvme->n_fwslot = (nvme_fwslot_log_t *)log;
4882 4893 }
4883 4894
4884 4895 /*
4885 4896 * NVMe numbers firmware slots starting at 1
4886 4897 */
4887 4898 if (slotno == (nvme->n_fwslot->fw_afi - 1))
4888 4899 attr |= DDI_UFM_ATTR_ACTIVE;
4889 4900
4890 4901 if (slotno != 0 || nvme->n_idctl->id_frmw.fw_readonly == 0)
4891 4902 attr |= DDI_UFM_ATTR_WRITEABLE;
4892 4903
4893 4904 if (nvme->n_fwslot->fw_frs[slotno][0] == '\0') {
4894 4905 attr |= DDI_UFM_ATTR_EMPTY;
4895 4906 } else {
4896 4907 (void) strncpy(fw_ver, nvme->n_fwslot->fw_frs[slotno],
4897 4908 NVME_FWVER_SZ);
4898 4909 fw_ver[NVME_FWVER_SZ] = '\0';
4899 4910 ddi_ufm_slot_set_version(slot, fw_ver);
4900 4911 }
4901 4912 mutex_exit(&nvme->n_fwslot_mutex);
4902 4913
4903 4914 ddi_ufm_slot_set_attrs(slot, attr);
4904 4915
4905 4916 return (0);
4906 4917 }
4907 4918
4908 4919 static int
4909 4920 nvme_ufm_getcaps(ddi_ufm_handle_t *ufmh, void *arg, ddi_ufm_cap_t *caps)
4910 4921 {
4911 4922 *caps = DDI_UFM_CAP_REPORT;
4912 4923 return (0);
4913 4924 }
|
↓ open down ↓ |
321 lines elided |
↑ open up ↑ |
XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX