2 * NVM Express device driver
3 * Copyright (c) 2011-2014, Intel Corporation.
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
15 #include <linux/blkdev.h>
16 #include <linux/blk-mq.h>
17 #include <linux/delay.h>
18 #include <linux/errno.h>
19 #include <linux/hdreg.h>
20 #include <linux/kernel.h>
21 #include <linux/module.h>
22 #include <linux/list_sort.h>
23 #include <linux/slab.h>
24 #include <linux/types.h>
26 #include <linux/ptrace.h>
27 #include <linux/nvme_ioctl.h>
28 #include <linux/t10-pi.h>
29 #include <linux/pm_qos.h>
30 #include <asm/unaligned.h>
35 #define NVME_MINORS (1U << MINORBITS)
37 unsigned char admin_timeout
= 60;
38 module_param(admin_timeout
, byte
, 0644);
39 MODULE_PARM_DESC(admin_timeout
, "timeout in seconds for admin commands");
40 EXPORT_SYMBOL_GPL(admin_timeout
);
42 unsigned char nvme_io_timeout
= 30;
43 module_param_named(io_timeout
, nvme_io_timeout
, byte
, 0644);
44 MODULE_PARM_DESC(io_timeout
, "timeout in seconds for I/O");
45 EXPORT_SYMBOL_GPL(nvme_io_timeout
);
47 static unsigned char shutdown_timeout
= 5;
48 module_param(shutdown_timeout
, byte
, 0644);
49 MODULE_PARM_DESC(shutdown_timeout
, "timeout in seconds for controller shutdown");
51 static u8 nvme_max_retries
= 5;
52 module_param_named(max_retries
, nvme_max_retries
, byte
, 0644);
53 MODULE_PARM_DESC(max_retries
, "max number of retries a command may have");
55 static int nvme_char_major
;
56 module_param(nvme_char_major
, int, 0);
58 static unsigned long default_ps_max_latency_us
= 100000;
59 module_param(default_ps_max_latency_us
, ulong
, 0644);
60 MODULE_PARM_DESC(default_ps_max_latency_us
,
61 "max power saving latency for new devices; use PM QOS to change per device");
63 static bool force_apst
;
64 module_param(force_apst
, bool, 0644);
65 MODULE_PARM_DESC(force_apst
, "allow APST for newly enumerated devices even if quirked off");
68 module_param(streams
, bool, 0644);
69 MODULE_PARM_DESC(streams
, "turn on support for Streams write directives");
71 struct workqueue_struct
*nvme_wq
;
72 EXPORT_SYMBOL_GPL(nvme_wq
);
74 static LIST_HEAD(nvme_ctrl_list
);
75 static DEFINE_SPINLOCK(dev_list_lock
);
77 static struct class *nvme_class
;
79 static __le32
nvme_get_log_dw10(u8 lid
, size_t size
)
81 return cpu_to_le32((((size
/ 4) - 1) << 16) | lid
);
84 int nvme_reset_ctrl(struct nvme_ctrl
*ctrl
)
86 if (!nvme_change_ctrl_state(ctrl
, NVME_CTRL_RESETTING
))
88 if (!queue_work(nvme_wq
, &ctrl
->reset_work
))
92 EXPORT_SYMBOL_GPL(nvme_reset_ctrl
);
94 static int nvme_reset_ctrl_sync(struct nvme_ctrl
*ctrl
)
98 ret
= nvme_reset_ctrl(ctrl
);
100 flush_work(&ctrl
->reset_work
);
104 static blk_status_t
nvme_error_status(struct request
*req
)
106 switch (nvme_req(req
)->status
& 0x7ff) {
107 case NVME_SC_SUCCESS
:
109 case NVME_SC_CAP_EXCEEDED
:
110 return BLK_STS_NOSPC
;
111 case NVME_SC_ONCS_NOT_SUPPORTED
:
112 return BLK_STS_NOTSUPP
;
113 case NVME_SC_WRITE_FAULT
:
114 case NVME_SC_READ_ERROR
:
115 case NVME_SC_UNWRITTEN_BLOCK
:
116 case NVME_SC_ACCESS_DENIED
:
117 case NVME_SC_READ_ONLY
:
118 return BLK_STS_MEDIUM
;
119 case NVME_SC_GUARD_CHECK
:
120 case NVME_SC_APPTAG_CHECK
:
121 case NVME_SC_REFTAG_CHECK
:
122 case NVME_SC_INVALID_PI
:
123 return BLK_STS_PROTECTION
;
124 case NVME_SC_RESERVATION_CONFLICT
:
125 return BLK_STS_NEXUS
;
127 return BLK_STS_IOERR
;
131 static inline bool nvme_req_needs_retry(struct request
*req
)
133 if (blk_noretry_request(req
))
135 if (nvme_req(req
)->status
& NVME_SC_DNR
)
137 if (nvme_req(req
)->retries
>= nvme_max_retries
)
142 void nvme_complete_rq(struct request
*req
)
144 if (unlikely(nvme_req(req
)->status
&& nvme_req_needs_retry(req
))) {
145 nvme_req(req
)->retries
++;
146 blk_mq_requeue_request(req
, true);
150 blk_mq_end_request(req
, nvme_error_status(req
));
152 EXPORT_SYMBOL_GPL(nvme_complete_rq
);
154 void nvme_cancel_request(struct request
*req
, void *data
, bool reserved
)
158 if (!blk_mq_request_started(req
))
161 dev_dbg_ratelimited(((struct nvme_ctrl
*) data
)->device
,
162 "Cancelling I/O %d", req
->tag
);
164 status
= NVME_SC_ABORT_REQ
;
165 if (blk_queue_dying(req
->q
))
166 status
|= NVME_SC_DNR
;
167 nvme_req(req
)->status
= status
;
168 blk_mq_complete_request(req
);
171 EXPORT_SYMBOL_GPL(nvme_cancel_request
);
173 bool nvme_change_ctrl_state(struct nvme_ctrl
*ctrl
,
174 enum nvme_ctrl_state new_state
)
176 enum nvme_ctrl_state old_state
;
178 bool changed
= false;
180 spin_lock_irqsave(&ctrl
->lock
, flags
);
182 old_state
= ctrl
->state
;
187 case NVME_CTRL_RESETTING
:
188 case NVME_CTRL_RECONNECTING
:
195 case NVME_CTRL_RESETTING
:
205 case NVME_CTRL_RECONNECTING
:
214 case NVME_CTRL_DELETING
:
217 case NVME_CTRL_RESETTING
:
218 case NVME_CTRL_RECONNECTING
:
227 case NVME_CTRL_DELETING
:
239 ctrl
->state
= new_state
;
241 spin_unlock_irqrestore(&ctrl
->lock
, flags
);
245 EXPORT_SYMBOL_GPL(nvme_change_ctrl_state
);
247 static void nvme_free_ns(struct kref
*kref
)
249 struct nvme_ns
*ns
= container_of(kref
, struct nvme_ns
, kref
);
252 nvme_nvm_unregister(ns
);
255 spin_lock(&dev_list_lock
);
256 ns
->disk
->private_data
= NULL
;
257 spin_unlock(&dev_list_lock
);
261 ida_simple_remove(&ns
->ctrl
->ns_ida
, ns
->instance
);
262 nvme_put_ctrl(ns
->ctrl
);
266 static void nvme_put_ns(struct nvme_ns
*ns
)
268 kref_put(&ns
->kref
, nvme_free_ns
);
271 static struct nvme_ns
*nvme_get_ns_from_disk(struct gendisk
*disk
)
275 spin_lock(&dev_list_lock
);
276 ns
= disk
->private_data
;
278 if (!kref_get_unless_zero(&ns
->kref
))
280 if (!try_module_get(ns
->ctrl
->ops
->module
))
283 spin_unlock(&dev_list_lock
);
288 kref_put(&ns
->kref
, nvme_free_ns
);
290 spin_unlock(&dev_list_lock
);
294 struct request
*nvme_alloc_request(struct request_queue
*q
,
295 struct nvme_command
*cmd
, unsigned int flags
, int qid
)
297 unsigned op
= nvme_is_write(cmd
) ? REQ_OP_DRV_OUT
: REQ_OP_DRV_IN
;
300 if (qid
== NVME_QID_ANY
) {
301 req
= blk_mq_alloc_request(q
, op
, flags
);
303 req
= blk_mq_alloc_request_hctx(q
, op
, flags
,
309 req
->cmd_flags
|= REQ_FAILFAST_DRIVER
;
310 nvme_req(req
)->cmd
= cmd
;
314 EXPORT_SYMBOL_GPL(nvme_alloc_request
);
316 static int nvme_toggle_streams(struct nvme_ctrl
*ctrl
, bool enable
)
318 struct nvme_command c
;
320 memset(&c
, 0, sizeof(c
));
322 c
.directive
.opcode
= nvme_admin_directive_send
;
323 c
.directive
.nsid
= cpu_to_le32(NVME_NSID_ALL
);
324 c
.directive
.doper
= NVME_DIR_SND_ID_OP_ENABLE
;
325 c
.directive
.dtype
= NVME_DIR_IDENTIFY
;
326 c
.directive
.tdtype
= NVME_DIR_STREAMS
;
327 c
.directive
.endir
= enable
? NVME_DIR_ENDIR
: 0;
329 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, NULL
, 0);
332 static int nvme_disable_streams(struct nvme_ctrl
*ctrl
)
334 return nvme_toggle_streams(ctrl
, false);
337 static int nvme_enable_streams(struct nvme_ctrl
*ctrl
)
339 return nvme_toggle_streams(ctrl
, true);
342 static int nvme_get_stream_params(struct nvme_ctrl
*ctrl
,
343 struct streams_directive_params
*s
, u32 nsid
)
345 struct nvme_command c
;
347 memset(&c
, 0, sizeof(c
));
348 memset(s
, 0, sizeof(*s
));
350 c
.directive
.opcode
= nvme_admin_directive_recv
;
351 c
.directive
.nsid
= cpu_to_le32(nsid
);
352 c
.directive
.numd
= cpu_to_le32((sizeof(*s
) >> 2) - 1);
353 c
.directive
.doper
= NVME_DIR_RCV_ST_OP_PARAM
;
354 c
.directive
.dtype
= NVME_DIR_STREAMS
;
356 return nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, s
, sizeof(*s
));
359 static int nvme_configure_directives(struct nvme_ctrl
*ctrl
)
361 struct streams_directive_params s
;
364 if (!(ctrl
->oacs
& NVME_CTRL_OACS_DIRECTIVES
))
369 ret
= nvme_enable_streams(ctrl
);
373 ret
= nvme_get_stream_params(ctrl
, &s
, NVME_NSID_ALL
);
377 ctrl
->nssa
= le16_to_cpu(s
.nssa
);
378 if (ctrl
->nssa
< BLK_MAX_WRITE_HINTS
- 1) {
379 dev_info(ctrl
->device
, "too few streams (%u) available\n",
381 nvme_disable_streams(ctrl
);
385 ctrl
->nr_streams
= min_t(unsigned, ctrl
->nssa
, BLK_MAX_WRITE_HINTS
- 1);
386 dev_info(ctrl
->device
, "Using %u streams\n", ctrl
->nr_streams
);
391 * Check if 'req' has a write hint associated with it. If it does, assign
392 * a valid namespace stream to the write.
394 static void nvme_assign_write_stream(struct nvme_ctrl
*ctrl
,
395 struct request
*req
, u16
*control
,
398 enum rw_hint streamid
= req
->write_hint
;
400 if (streamid
== WRITE_LIFE_NOT_SET
|| streamid
== WRITE_LIFE_NONE
)
404 if (WARN_ON_ONCE(streamid
> ctrl
->nr_streams
))
407 *control
|= NVME_RW_DTYPE_STREAMS
;
408 *dsmgmt
|= streamid
<< 16;
411 if (streamid
< ARRAY_SIZE(req
->q
->write_hints
))
412 req
->q
->write_hints
[streamid
] += blk_rq_bytes(req
) >> 9;
415 static inline void nvme_setup_flush(struct nvme_ns
*ns
,
416 struct nvme_command
*cmnd
)
418 memset(cmnd
, 0, sizeof(*cmnd
));
419 cmnd
->common
.opcode
= nvme_cmd_flush
;
420 cmnd
->common
.nsid
= cpu_to_le32(ns
->ns_id
);
423 static blk_status_t
nvme_setup_discard(struct nvme_ns
*ns
, struct request
*req
,
424 struct nvme_command
*cmnd
)
426 unsigned short segments
= blk_rq_nr_discard_segments(req
), n
= 0;
427 struct nvme_dsm_range
*range
;
430 range
= kmalloc_array(segments
, sizeof(*range
), GFP_ATOMIC
);
432 return BLK_STS_RESOURCE
;
434 __rq_for_each_bio(bio
, req
) {
435 u64 slba
= nvme_block_nr(ns
, bio
->bi_iter
.bi_sector
);
436 u32 nlb
= bio
->bi_iter
.bi_size
>> ns
->lba_shift
;
438 range
[n
].cattr
= cpu_to_le32(0);
439 range
[n
].nlb
= cpu_to_le32(nlb
);
440 range
[n
].slba
= cpu_to_le64(slba
);
444 if (WARN_ON_ONCE(n
!= segments
)) {
446 return BLK_STS_IOERR
;
449 memset(cmnd
, 0, sizeof(*cmnd
));
450 cmnd
->dsm
.opcode
= nvme_cmd_dsm
;
451 cmnd
->dsm
.nsid
= cpu_to_le32(ns
->ns_id
);
452 cmnd
->dsm
.nr
= cpu_to_le32(segments
- 1);
453 cmnd
->dsm
.attributes
= cpu_to_le32(NVME_DSMGMT_AD
);
455 req
->special_vec
.bv_page
= virt_to_page(range
);
456 req
->special_vec
.bv_offset
= offset_in_page(range
);
457 req
->special_vec
.bv_len
= sizeof(*range
) * segments
;
458 req
->rq_flags
|= RQF_SPECIAL_PAYLOAD
;
463 static inline blk_status_t
nvme_setup_rw(struct nvme_ns
*ns
,
464 struct request
*req
, struct nvme_command
*cmnd
)
466 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
471 * If formated with metadata, require the block layer provide a buffer
472 * unless this namespace is formated such that the metadata can be
473 * stripped/generated by the controller with PRACT=1.
476 (!ns
->pi_type
|| ns
->ms
!= sizeof(struct t10_pi_tuple
)) &&
477 !blk_integrity_rq(req
) && !blk_rq_is_passthrough(req
))
478 return BLK_STS_NOTSUPP
;
480 if (req
->cmd_flags
& REQ_FUA
)
481 control
|= NVME_RW_FUA
;
482 if (req
->cmd_flags
& (REQ_FAILFAST_DEV
| REQ_RAHEAD
))
483 control
|= NVME_RW_LR
;
485 if (req
->cmd_flags
& REQ_RAHEAD
)
486 dsmgmt
|= NVME_RW_DSM_FREQ_PREFETCH
;
488 memset(cmnd
, 0, sizeof(*cmnd
));
489 cmnd
->rw
.opcode
= (rq_data_dir(req
) ? nvme_cmd_write
: nvme_cmd_read
);
490 cmnd
->rw
.nsid
= cpu_to_le32(ns
->ns_id
);
491 cmnd
->rw
.slba
= cpu_to_le64(nvme_block_nr(ns
, blk_rq_pos(req
)));
492 cmnd
->rw
.length
= cpu_to_le16((blk_rq_bytes(req
) >> ns
->lba_shift
) - 1);
494 if (req_op(req
) == REQ_OP_WRITE
&& ctrl
->nr_streams
)
495 nvme_assign_write_stream(ctrl
, req
, &control
, &dsmgmt
);
498 switch (ns
->pi_type
) {
499 case NVME_NS_DPS_PI_TYPE3
:
500 control
|= NVME_RW_PRINFO_PRCHK_GUARD
;
502 case NVME_NS_DPS_PI_TYPE1
:
503 case NVME_NS_DPS_PI_TYPE2
:
504 control
|= NVME_RW_PRINFO_PRCHK_GUARD
|
505 NVME_RW_PRINFO_PRCHK_REF
;
506 cmnd
->rw
.reftag
= cpu_to_le32(
507 nvme_block_nr(ns
, blk_rq_pos(req
)));
510 if (!blk_integrity_rq(req
))
511 control
|= NVME_RW_PRINFO_PRACT
;
514 cmnd
->rw
.control
= cpu_to_le16(control
);
515 cmnd
->rw
.dsmgmt
= cpu_to_le32(dsmgmt
);
519 blk_status_t
nvme_setup_cmd(struct nvme_ns
*ns
, struct request
*req
,
520 struct nvme_command
*cmd
)
522 blk_status_t ret
= BLK_STS_OK
;
524 if (!(req
->rq_flags
& RQF_DONTPREP
)) {
525 nvme_req(req
)->retries
= 0;
526 nvme_req(req
)->flags
= 0;
527 req
->rq_flags
|= RQF_DONTPREP
;
530 switch (req_op(req
)) {
533 memcpy(cmd
, nvme_req(req
)->cmd
, sizeof(*cmd
));
536 nvme_setup_flush(ns
, cmd
);
538 case REQ_OP_WRITE_ZEROES
:
539 /* currently only aliased to deallocate for a few ctrls: */
541 ret
= nvme_setup_discard(ns
, req
, cmd
);
545 ret
= nvme_setup_rw(ns
, req
, cmd
);
549 return BLK_STS_IOERR
;
552 cmd
->common
.command_id
= req
->tag
;
555 EXPORT_SYMBOL_GPL(nvme_setup_cmd
);
558 * Returns 0 on success. If the result is negative, it's a Linux error code;
559 * if the result is positive, it's an NVM Express status code
561 int __nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
562 union nvme_result
*result
, void *buffer
, unsigned bufflen
,
563 unsigned timeout
, int qid
, int at_head
, int flags
)
568 req
= nvme_alloc_request(q
, cmd
, flags
, qid
);
572 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
574 if (buffer
&& bufflen
) {
575 ret
= blk_rq_map_kern(q
, req
, buffer
, bufflen
, GFP_KERNEL
);
580 blk_execute_rq(req
->q
, NULL
, req
, at_head
);
582 *result
= nvme_req(req
)->result
;
583 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
586 ret
= nvme_req(req
)->status
;
588 blk_mq_free_request(req
);
591 EXPORT_SYMBOL_GPL(__nvme_submit_sync_cmd
);
593 int nvme_submit_sync_cmd(struct request_queue
*q
, struct nvme_command
*cmd
,
594 void *buffer
, unsigned bufflen
)
596 return __nvme_submit_sync_cmd(q
, cmd
, NULL
, buffer
, bufflen
, 0,
599 EXPORT_SYMBOL_GPL(nvme_submit_sync_cmd
);
601 static void *nvme_add_user_metadata(struct bio
*bio
, void __user
*ubuf
,
602 unsigned len
, u32 seed
, bool write
)
604 struct bio_integrity_payload
*bip
;
608 buf
= kmalloc(len
, GFP_KERNEL
);
613 if (write
&& copy_from_user(buf
, ubuf
, len
))
616 bip
= bio_integrity_alloc(bio
, GFP_KERNEL
, 1);
622 bip
->bip_iter
.bi_size
= len
;
623 bip
->bip_iter
.bi_sector
= seed
;
624 ret
= bio_integrity_add_page(bio
, virt_to_page(buf
), len
,
625 offset_in_page(buf
));
635 static int nvme_submit_user_cmd(struct request_queue
*q
,
636 struct nvme_command
*cmd
, void __user
*ubuffer
,
637 unsigned bufflen
, void __user
*meta_buffer
, unsigned meta_len
,
638 u32 meta_seed
, u32
*result
, unsigned timeout
)
640 bool write
= nvme_is_write(cmd
);
641 struct nvme_ns
*ns
= q
->queuedata
;
642 struct gendisk
*disk
= ns
? ns
->disk
: NULL
;
644 struct bio
*bio
= NULL
;
648 req
= nvme_alloc_request(q
, cmd
, 0, NVME_QID_ANY
);
652 req
->timeout
= timeout
? timeout
: ADMIN_TIMEOUT
;
654 if (ubuffer
&& bufflen
) {
655 ret
= blk_rq_map_user(q
, req
, NULL
, ubuffer
, bufflen
,
661 if (disk
&& meta_buffer
&& meta_len
) {
662 meta
= nvme_add_user_metadata(bio
, meta_buffer
, meta_len
,
671 blk_execute_rq(req
->q
, disk
, req
, 0);
672 if (nvme_req(req
)->flags
& NVME_REQ_CANCELLED
)
675 ret
= nvme_req(req
)->status
;
677 *result
= le32_to_cpu(nvme_req(req
)->result
.u32
);
678 if (meta
&& !ret
&& !write
) {
679 if (copy_to_user(meta_buffer
, meta
, meta_len
))
685 blk_rq_unmap_user(bio
);
687 blk_mq_free_request(req
);
691 static void nvme_keep_alive_end_io(struct request
*rq
, blk_status_t status
)
693 struct nvme_ctrl
*ctrl
= rq
->end_io_data
;
695 blk_mq_free_request(rq
);
698 dev_err(ctrl
->device
,
699 "failed nvme_keep_alive_end_io error=%d\n",
704 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
707 static int nvme_keep_alive(struct nvme_ctrl
*ctrl
)
709 struct nvme_command c
;
712 memset(&c
, 0, sizeof(c
));
713 c
.common
.opcode
= nvme_admin_keep_alive
;
715 rq
= nvme_alloc_request(ctrl
->admin_q
, &c
, BLK_MQ_REQ_RESERVED
,
720 rq
->timeout
= ctrl
->kato
* HZ
;
721 rq
->end_io_data
= ctrl
;
723 blk_execute_rq_nowait(rq
->q
, NULL
, rq
, 0, nvme_keep_alive_end_io
);
728 static void nvme_keep_alive_work(struct work_struct
*work
)
730 struct nvme_ctrl
*ctrl
= container_of(to_delayed_work(work
),
731 struct nvme_ctrl
, ka_work
);
733 if (nvme_keep_alive(ctrl
)) {
734 /* allocation failure, reset the controller */
735 dev_err(ctrl
->device
, "keep-alive failed\n");
736 nvme_reset_ctrl(ctrl
);
741 void nvme_start_keep_alive(struct nvme_ctrl
*ctrl
)
743 if (unlikely(ctrl
->kato
== 0))
746 INIT_DELAYED_WORK(&ctrl
->ka_work
, nvme_keep_alive_work
);
747 schedule_delayed_work(&ctrl
->ka_work
, ctrl
->kato
* HZ
);
749 EXPORT_SYMBOL_GPL(nvme_start_keep_alive
);
751 void nvme_stop_keep_alive(struct nvme_ctrl
*ctrl
)
753 if (unlikely(ctrl
->kato
== 0))
756 cancel_delayed_work_sync(&ctrl
->ka_work
);
758 EXPORT_SYMBOL_GPL(nvme_stop_keep_alive
);
760 static int nvme_identify_ctrl(struct nvme_ctrl
*dev
, struct nvme_id_ctrl
**id
)
762 struct nvme_command c
= { };
765 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
766 c
.identify
.opcode
= nvme_admin_identify
;
767 c
.identify
.cns
= NVME_ID_CNS_CTRL
;
769 *id
= kmalloc(sizeof(struct nvme_id_ctrl
), GFP_KERNEL
);
773 error
= nvme_submit_sync_cmd(dev
->admin_q
, &c
, *id
,
774 sizeof(struct nvme_id_ctrl
));
780 static int nvme_identify_ns_descs(struct nvme_ctrl
*ctrl
, unsigned nsid
,
781 u8
*eui64
, u8
*nguid
, uuid_t
*uuid
)
783 struct nvme_command c
= { };
789 c
.identify
.opcode
= nvme_admin_identify
;
790 c
.identify
.nsid
= cpu_to_le32(nsid
);
791 c
.identify
.cns
= NVME_ID_CNS_NS_DESC_LIST
;
793 data
= kzalloc(NVME_IDENTIFY_DATA_SIZE
, GFP_KERNEL
);
797 status
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, data
,
798 NVME_IDENTIFY_DATA_SIZE
);
802 for (pos
= 0; pos
< NVME_IDENTIFY_DATA_SIZE
; pos
+= len
) {
803 struct nvme_ns_id_desc
*cur
= data
+ pos
;
809 case NVME_NIDT_EUI64
:
810 if (cur
->nidl
!= NVME_NIDT_EUI64_LEN
) {
811 dev_warn(ctrl
->device
,
812 "ctrl returned bogus length: %d for NVME_NIDT_EUI64\n",
816 len
= NVME_NIDT_EUI64_LEN
;
817 memcpy(eui64
, data
+ pos
+ sizeof(*cur
), len
);
819 case NVME_NIDT_NGUID
:
820 if (cur
->nidl
!= NVME_NIDT_NGUID_LEN
) {
821 dev_warn(ctrl
->device
,
822 "ctrl returned bogus length: %d for NVME_NIDT_NGUID\n",
826 len
= NVME_NIDT_NGUID_LEN
;
827 memcpy(nguid
, data
+ pos
+ sizeof(*cur
), len
);
830 if (cur
->nidl
!= NVME_NIDT_UUID_LEN
) {
831 dev_warn(ctrl
->device
,
832 "ctrl returned bogus length: %d for NVME_NIDT_UUID\n",
836 len
= NVME_NIDT_UUID_LEN
;
837 uuid_copy(uuid
, data
+ pos
+ sizeof(*cur
));
840 /* Skip unnkown types */
852 static int nvme_identify_ns_list(struct nvme_ctrl
*dev
, unsigned nsid
, __le32
*ns_list
)
854 struct nvme_command c
= { };
856 c
.identify
.opcode
= nvme_admin_identify
;
857 c
.identify
.cns
= NVME_ID_CNS_NS_ACTIVE_LIST
;
858 c
.identify
.nsid
= cpu_to_le32(nsid
);
859 return nvme_submit_sync_cmd(dev
->admin_q
, &c
, ns_list
, 0x1000);
862 static struct nvme_id_ns
*nvme_identify_ns(struct nvme_ctrl
*ctrl
,
865 struct nvme_id_ns
*id
;
866 struct nvme_command c
= { };
869 /* gcc-4.4.4 (at least) has issues with initializers and anon unions */
870 c
.identify
.opcode
= nvme_admin_identify
;
871 c
.identify
.nsid
= cpu_to_le32(nsid
);
872 c
.identify
.cns
= NVME_ID_CNS_NS
;
874 id
= kmalloc(sizeof(*id
), GFP_KERNEL
);
878 error
= nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, id
, sizeof(*id
));
880 dev_warn(ctrl
->device
, "Identify namespace failed\n");
888 static int nvme_set_features(struct nvme_ctrl
*dev
, unsigned fid
, unsigned dword11
,
889 void *buffer
, size_t buflen
, u32
*result
)
891 struct nvme_command c
;
892 union nvme_result res
;
895 memset(&c
, 0, sizeof(c
));
896 c
.features
.opcode
= nvme_admin_set_features
;
897 c
.features
.fid
= cpu_to_le32(fid
);
898 c
.features
.dword11
= cpu_to_le32(dword11
);
900 ret
= __nvme_submit_sync_cmd(dev
->admin_q
, &c
, &res
,
901 buffer
, buflen
, 0, NVME_QID_ANY
, 0, 0);
902 if (ret
>= 0 && result
)
903 *result
= le32_to_cpu(res
.u32
);
907 int nvme_set_queue_count(struct nvme_ctrl
*ctrl
, int *count
)
909 u32 q_count
= (*count
- 1) | ((*count
- 1) << 16);
911 int status
, nr_io_queues
;
913 status
= nvme_set_features(ctrl
, NVME_FEAT_NUM_QUEUES
, q_count
, NULL
, 0,
919 * Degraded controllers might return an error when setting the queue
920 * count. We still want to be able to bring them online and offer
921 * access to the admin queue, as that might be only way to fix them up.
924 dev_err(ctrl
->device
, "Could not set queue count (%d)\n", status
);
927 nr_io_queues
= min(result
& 0xffff, result
>> 16) + 1;
928 *count
= min(*count
, nr_io_queues
);
933 EXPORT_SYMBOL_GPL(nvme_set_queue_count
);
935 static int nvme_submit_io(struct nvme_ns
*ns
, struct nvme_user_io __user
*uio
)
937 struct nvme_user_io io
;
938 struct nvme_command c
;
939 unsigned length
, meta_len
;
940 void __user
*metadata
;
942 if (copy_from_user(&io
, uio
, sizeof(io
)))
950 case nvme_cmd_compare
:
956 length
= (io
.nblocks
+ 1) << ns
->lba_shift
;
957 meta_len
= (io
.nblocks
+ 1) * ns
->ms
;
958 metadata
= (void __user
*)(uintptr_t)io
.metadata
;
963 } else if (meta_len
) {
964 if ((io
.metadata
& 3) || !io
.metadata
)
968 memset(&c
, 0, sizeof(c
));
969 c
.rw
.opcode
= io
.opcode
;
970 c
.rw
.flags
= io
.flags
;
971 c
.rw
.nsid
= cpu_to_le32(ns
->ns_id
);
972 c
.rw
.slba
= cpu_to_le64(io
.slba
);
973 c
.rw
.length
= cpu_to_le16(io
.nblocks
);
974 c
.rw
.control
= cpu_to_le16(io
.control
);
975 c
.rw
.dsmgmt
= cpu_to_le32(io
.dsmgmt
);
976 c
.rw
.reftag
= cpu_to_le32(io
.reftag
);
977 c
.rw
.apptag
= cpu_to_le16(io
.apptag
);
978 c
.rw
.appmask
= cpu_to_le16(io
.appmask
);
980 return nvme_submit_user_cmd(ns
->queue
, &c
,
981 (void __user
*)(uintptr_t)io
.addr
, length
,
982 metadata
, meta_len
, io
.slba
, NULL
, 0);
985 static int nvme_user_cmd(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
,
986 struct nvme_passthru_cmd __user
*ucmd
)
988 struct nvme_passthru_cmd cmd
;
989 struct nvme_command c
;
990 unsigned timeout
= 0;
993 if (!capable(CAP_SYS_ADMIN
))
995 if (copy_from_user(&cmd
, ucmd
, sizeof(cmd
)))
1000 memset(&c
, 0, sizeof(c
));
1001 c
.common
.opcode
= cmd
.opcode
;
1002 c
.common
.flags
= cmd
.flags
;
1003 c
.common
.nsid
= cpu_to_le32(cmd
.nsid
);
1004 c
.common
.cdw2
[0] = cpu_to_le32(cmd
.cdw2
);
1005 c
.common
.cdw2
[1] = cpu_to_le32(cmd
.cdw3
);
1006 c
.common
.cdw10
[0] = cpu_to_le32(cmd
.cdw10
);
1007 c
.common
.cdw10
[1] = cpu_to_le32(cmd
.cdw11
);
1008 c
.common
.cdw10
[2] = cpu_to_le32(cmd
.cdw12
);
1009 c
.common
.cdw10
[3] = cpu_to_le32(cmd
.cdw13
);
1010 c
.common
.cdw10
[4] = cpu_to_le32(cmd
.cdw14
);
1011 c
.common
.cdw10
[5] = cpu_to_le32(cmd
.cdw15
);
1014 timeout
= msecs_to_jiffies(cmd
.timeout_ms
);
1016 status
= nvme_submit_user_cmd(ns
? ns
->queue
: ctrl
->admin_q
, &c
,
1017 (void __user
*)(uintptr_t)cmd
.addr
, cmd
.data_len
,
1018 (void __user
*)(uintptr_t)cmd
.metadata
, cmd
.metadata
,
1019 0, &cmd
.result
, timeout
);
1021 if (put_user(cmd
.result
, &ucmd
->result
))
1028 static int nvme_ioctl(struct block_device
*bdev
, fmode_t mode
,
1029 unsigned int cmd
, unsigned long arg
)
1031 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
1035 force_successful_syscall_return();
1037 case NVME_IOCTL_ADMIN_CMD
:
1038 return nvme_user_cmd(ns
->ctrl
, NULL
, (void __user
*)arg
);
1039 case NVME_IOCTL_IO_CMD
:
1040 return nvme_user_cmd(ns
->ctrl
, ns
, (void __user
*)arg
);
1041 case NVME_IOCTL_SUBMIT_IO
:
1042 return nvme_submit_io(ns
, (void __user
*)arg
);
1046 return nvme_nvm_ioctl(ns
, cmd
, arg
);
1048 if (is_sed_ioctl(cmd
))
1049 return sed_ioctl(ns
->ctrl
->opal_dev
, cmd
,
1050 (void __user
*) arg
);
1055 #ifdef CONFIG_COMPAT
1056 static int nvme_compat_ioctl(struct block_device
*bdev
, fmode_t mode
,
1057 unsigned int cmd
, unsigned long arg
)
1059 return nvme_ioctl(bdev
, mode
, cmd
, arg
);
1062 #define nvme_compat_ioctl NULL
1065 static int nvme_open(struct block_device
*bdev
, fmode_t mode
)
1067 return nvme_get_ns_from_disk(bdev
->bd_disk
) ? 0 : -ENXIO
;
1070 static void nvme_release(struct gendisk
*disk
, fmode_t mode
)
1072 struct nvme_ns
*ns
= disk
->private_data
;
1074 module_put(ns
->ctrl
->ops
->module
);
1078 static int nvme_getgeo(struct block_device
*bdev
, struct hd_geometry
*geo
)
1080 /* some standard values */
1081 geo
->heads
= 1 << 6;
1082 geo
->sectors
= 1 << 5;
1083 geo
->cylinders
= get_capacity(bdev
->bd_disk
) >> 11;
1087 #ifdef CONFIG_BLK_DEV_INTEGRITY
1088 static void nvme_prep_integrity(struct gendisk
*disk
, struct nvme_id_ns
*id
,
1091 struct nvme_ns
*ns
= disk
->private_data
;
1092 u16 old_ms
= ns
->ms
;
1095 ns
->ms
= le16_to_cpu(id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ms
);
1096 ns
->ext
= ns
->ms
&& (id
->flbas
& NVME_NS_FLBAS_META_EXT
);
1098 /* PI implementation requires metadata equal t10 pi tuple size */
1099 if (ns
->ms
== sizeof(struct t10_pi_tuple
))
1100 pi_type
= id
->dps
& NVME_NS_DPS_PI_MASK
;
1102 if (blk_get_integrity(disk
) &&
1103 (ns
->pi_type
!= pi_type
|| ns
->ms
!= old_ms
||
1104 bs
!= queue_logical_block_size(disk
->queue
) ||
1105 (ns
->ms
&& ns
->ext
)))
1106 blk_integrity_unregister(disk
);
1108 ns
->pi_type
= pi_type
;
1111 static void nvme_init_integrity(struct nvme_ns
*ns
)
1113 struct blk_integrity integrity
;
1115 memset(&integrity
, 0, sizeof(integrity
));
1116 switch (ns
->pi_type
) {
1117 case NVME_NS_DPS_PI_TYPE3
:
1118 integrity
.profile
= &t10_pi_type3_crc
;
1119 integrity
.tag_size
= sizeof(u16
) + sizeof(u32
);
1120 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1122 case NVME_NS_DPS_PI_TYPE1
:
1123 case NVME_NS_DPS_PI_TYPE2
:
1124 integrity
.profile
= &t10_pi_type1_crc
;
1125 integrity
.tag_size
= sizeof(u16
);
1126 integrity
.flags
|= BLK_INTEGRITY_DEVICE_CAPABLE
;
1129 integrity
.profile
= NULL
;
1132 integrity
.tuple_size
= ns
->ms
;
1133 blk_integrity_register(ns
->disk
, &integrity
);
1134 blk_queue_max_integrity_segments(ns
->queue
, 1);
1137 static void nvme_prep_integrity(struct gendisk
*disk
, struct nvme_id_ns
*id
,
1141 static void nvme_init_integrity(struct nvme_ns
*ns
)
1144 #endif /* CONFIG_BLK_DEV_INTEGRITY */
1146 static void nvme_set_chunk_size(struct nvme_ns
*ns
)
1148 u32 chunk_size
= (((u32
)ns
->noiob
) << (ns
->lba_shift
- 9));
1149 blk_queue_chunk_sectors(ns
->queue
, rounddown_pow_of_two(chunk_size
));
1152 static void nvme_config_discard(struct nvme_ns
*ns
)
1154 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1155 u32 logical_block_size
= queue_logical_block_size(ns
->queue
);
1157 BUILD_BUG_ON(PAGE_SIZE
/ sizeof(struct nvme_dsm_range
) <
1158 NVME_DSM_MAX_RANGES
);
1160 if (ctrl
->nr_streams
&& ns
->sws
&& ns
->sgs
) {
1161 unsigned int sz
= logical_block_size
* ns
->sws
* ns
->sgs
;
1163 ns
->queue
->limits
.discard_alignment
= sz
;
1164 ns
->queue
->limits
.discard_granularity
= sz
;
1166 ns
->queue
->limits
.discard_alignment
= logical_block_size
;
1167 ns
->queue
->limits
.discard_granularity
= logical_block_size
;
1169 blk_queue_max_discard_sectors(ns
->queue
, UINT_MAX
);
1170 blk_queue_max_discard_segments(ns
->queue
, NVME_DSM_MAX_RANGES
);
1171 queue_flag_set_unlocked(QUEUE_FLAG_DISCARD
, ns
->queue
);
1173 if (ctrl
->quirks
& NVME_QUIRK_DEALLOCATE_ZEROES
)
1174 blk_queue_max_write_zeroes_sectors(ns
->queue
, UINT_MAX
);
1177 static void nvme_report_ns_ids(struct nvme_ctrl
*ctrl
, unsigned int nsid
,
1178 struct nvme_id_ns
*id
, u8
*eui64
, u8
*nguid
, uuid_t
*uuid
)
1180 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
1181 memcpy(eui64
, id
->eui64
, sizeof(id
->eui64
));
1182 if (ctrl
->vs
>= NVME_VS(1, 2, 0))
1183 memcpy(nguid
, id
->nguid
, sizeof(id
->nguid
));
1184 if (ctrl
->vs
>= NVME_VS(1, 3, 0)) {
1185 /* Don't treat error as fatal we potentially
1186 * already have a NGUID or EUI-64
1188 if (nvme_identify_ns_descs(ctrl
, nsid
, eui64
, nguid
, uuid
))
1189 dev_warn(ctrl
->device
,
1190 "%s: Identify Descriptors failed\n", __func__
);
1194 static void __nvme_revalidate_disk(struct gendisk
*disk
, struct nvme_id_ns
*id
)
1196 struct nvme_ns
*ns
= disk
->private_data
;
1197 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1201 * If identify namespace failed, use default 512 byte block size so
1202 * block layer can use before failing read/write for 0 capacity.
1204 ns
->lba_shift
= id
->lbaf
[id
->flbas
& NVME_NS_FLBAS_LBA_MASK
].ds
;
1205 if (ns
->lba_shift
== 0)
1207 bs
= 1 << ns
->lba_shift
;
1208 ns
->noiob
= le16_to_cpu(id
->noiob
);
1210 blk_mq_freeze_queue(disk
->queue
);
1212 if (ctrl
->ops
->flags
& NVME_F_METADATA_SUPPORTED
)
1213 nvme_prep_integrity(disk
, id
, bs
);
1214 blk_queue_logical_block_size(ns
->queue
, bs
);
1216 nvme_set_chunk_size(ns
);
1217 if (ns
->ms
&& !blk_get_integrity(disk
) && !ns
->ext
)
1218 nvme_init_integrity(ns
);
1219 if (ns
->ms
&& !(ns
->ms
== 8 && ns
->pi_type
) && !blk_get_integrity(disk
))
1220 set_capacity(disk
, 0);
1222 set_capacity(disk
, le64_to_cpup(&id
->nsze
) << (ns
->lba_shift
- 9));
1224 if (ctrl
->oncs
& NVME_CTRL_ONCS_DSM
)
1225 nvme_config_discard(ns
);
1226 blk_mq_unfreeze_queue(disk
->queue
);
1229 static int nvme_revalidate_disk(struct gendisk
*disk
)
1231 struct nvme_ns
*ns
= disk
->private_data
;
1232 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
1233 struct nvme_id_ns
*id
;
1234 u8 eui64
[8] = { 0 }, nguid
[16] = { 0 };
1235 uuid_t uuid
= uuid_null
;
1238 if (test_bit(NVME_NS_DEAD
, &ns
->flags
)) {
1239 set_capacity(disk
, 0);
1243 id
= nvme_identify_ns(ctrl
, ns
->ns_id
);
1247 if (id
->ncap
== 0) {
1252 __nvme_revalidate_disk(disk
, id
);
1253 nvme_report_ns_ids(ctrl
, ns
->ns_id
, id
, eui64
, nguid
, &uuid
);
1254 if (!uuid_equal(&ns
->uuid
, &uuid
) ||
1255 memcmp(&ns
->nguid
, &nguid
, sizeof(ns
->nguid
)) ||
1256 memcmp(&ns
->eui
, &eui64
, sizeof(ns
->eui
))) {
1257 dev_err(ctrl
->device
,
1258 "identifiers changed for nsid %d\n", ns
->ns_id
);
1267 static char nvme_pr_type(enum pr_type type
)
1270 case PR_WRITE_EXCLUSIVE
:
1272 case PR_EXCLUSIVE_ACCESS
:
1274 case PR_WRITE_EXCLUSIVE_REG_ONLY
:
1276 case PR_EXCLUSIVE_ACCESS_REG_ONLY
:
1278 case PR_WRITE_EXCLUSIVE_ALL_REGS
:
1280 case PR_EXCLUSIVE_ACCESS_ALL_REGS
:
1287 static int nvme_pr_command(struct block_device
*bdev
, u32 cdw10
,
1288 u64 key
, u64 sa_key
, u8 op
)
1290 struct nvme_ns
*ns
= bdev
->bd_disk
->private_data
;
1291 struct nvme_command c
;
1292 u8 data
[16] = { 0, };
1294 put_unaligned_le64(key
, &data
[0]);
1295 put_unaligned_le64(sa_key
, &data
[8]);
1297 memset(&c
, 0, sizeof(c
));
1298 c
.common
.opcode
= op
;
1299 c
.common
.nsid
= cpu_to_le32(ns
->ns_id
);
1300 c
.common
.cdw10
[0] = cpu_to_le32(cdw10
);
1302 return nvme_submit_sync_cmd(ns
->queue
, &c
, data
, 16);
1305 static int nvme_pr_register(struct block_device
*bdev
, u64 old
,
1306 u64
new, unsigned flags
)
1310 if (flags
& ~PR_FL_IGNORE_KEY
)
1313 cdw10
= old
? 2 : 0;
1314 cdw10
|= (flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0;
1315 cdw10
|= (1 << 30) | (1 << 31); /* PTPL=1 */
1316 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_register
);
1319 static int nvme_pr_reserve(struct block_device
*bdev
, u64 key
,
1320 enum pr_type type
, unsigned flags
)
1324 if (flags
& ~PR_FL_IGNORE_KEY
)
1327 cdw10
= nvme_pr_type(type
) << 8;
1328 cdw10
|= ((flags
& PR_FL_IGNORE_KEY
) ? 1 << 3 : 0);
1329 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_acquire
);
1332 static int nvme_pr_preempt(struct block_device
*bdev
, u64 old
, u64
new,
1333 enum pr_type type
, bool abort
)
1335 u32 cdw10
= nvme_pr_type(type
) << 8 | abort
? 2 : 1;
1336 return nvme_pr_command(bdev
, cdw10
, old
, new, nvme_cmd_resv_acquire
);
1339 static int nvme_pr_clear(struct block_device
*bdev
, u64 key
)
1341 u32 cdw10
= 1 | (key
? 1 << 3 : 0);
1342 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_register
);
1345 static int nvme_pr_release(struct block_device
*bdev
, u64 key
, enum pr_type type
)
1347 u32 cdw10
= nvme_pr_type(type
) << 8 | key
? 1 << 3 : 0;
1348 return nvme_pr_command(bdev
, cdw10
, key
, 0, nvme_cmd_resv_release
);
1351 static const struct pr_ops nvme_pr_ops
= {
1352 .pr_register
= nvme_pr_register
,
1353 .pr_reserve
= nvme_pr_reserve
,
1354 .pr_release
= nvme_pr_release
,
1355 .pr_preempt
= nvme_pr_preempt
,
1356 .pr_clear
= nvme_pr_clear
,
1359 #ifdef CONFIG_BLK_SED_OPAL
1360 int nvme_sec_submit(void *data
, u16 spsp
, u8 secp
, void *buffer
, size_t len
,
1363 struct nvme_ctrl
*ctrl
= data
;
1364 struct nvme_command cmd
;
1366 memset(&cmd
, 0, sizeof(cmd
));
1368 cmd
.common
.opcode
= nvme_admin_security_send
;
1370 cmd
.common
.opcode
= nvme_admin_security_recv
;
1371 cmd
.common
.nsid
= 0;
1372 cmd
.common
.cdw10
[0] = cpu_to_le32(((u32
)secp
) << 24 | ((u32
)spsp
) << 8);
1373 cmd
.common
.cdw10
[1] = cpu_to_le32(len
);
1375 return __nvme_submit_sync_cmd(ctrl
->admin_q
, &cmd
, NULL
, buffer
, len
,
1376 ADMIN_TIMEOUT
, NVME_QID_ANY
, 1, 0);
1378 EXPORT_SYMBOL_GPL(nvme_sec_submit
);
1379 #endif /* CONFIG_BLK_SED_OPAL */
1381 static const struct block_device_operations nvme_fops
= {
1382 .owner
= THIS_MODULE
,
1383 .ioctl
= nvme_ioctl
,
1384 .compat_ioctl
= nvme_compat_ioctl
,
1386 .release
= nvme_release
,
1387 .getgeo
= nvme_getgeo
,
1388 .revalidate_disk
= nvme_revalidate_disk
,
1389 .pr_ops
= &nvme_pr_ops
,
1392 static int nvme_wait_ready(struct nvme_ctrl
*ctrl
, u64 cap
, bool enabled
)
1394 unsigned long timeout
=
1395 ((NVME_CAP_TIMEOUT(cap
) + 1) * HZ
/ 2) + jiffies
;
1396 u32 csts
, bit
= enabled
? NVME_CSTS_RDY
: 0;
1399 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
1402 if ((csts
& NVME_CSTS_RDY
) == bit
)
1406 if (fatal_signal_pending(current
))
1408 if (time_after(jiffies
, timeout
)) {
1409 dev_err(ctrl
->device
,
1410 "Device not ready; aborting %s\n", enabled
?
1411 "initialisation" : "reset");
1420 * If the device has been passed off to us in an enabled state, just clear
1421 * the enabled bit. The spec says we should set the 'shutdown notification
1422 * bits', but doing so may cause the device to complete commands to the
1423 * admin queue ... and we don't know what memory that might be pointing at!
1425 int nvme_disable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
1429 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
1430 ctrl
->ctrl_config
&= ~NVME_CC_ENABLE
;
1432 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1436 if (ctrl
->quirks
& NVME_QUIRK_DELAY_BEFORE_CHK_RDY
)
1437 msleep(NVME_QUIRK_DELAY_AMOUNT
);
1439 return nvme_wait_ready(ctrl
, cap
, false);
1441 EXPORT_SYMBOL_GPL(nvme_disable_ctrl
);
1443 int nvme_enable_ctrl(struct nvme_ctrl
*ctrl
, u64 cap
)
1446 * Default to a 4K page size, with the intention to update this
1447 * path in the future to accomodate architectures with differing
1448 * kernel and IO page sizes.
1450 unsigned dev_page_min
= NVME_CAP_MPSMIN(cap
) + 12, page_shift
= 12;
1453 if (page_shift
< dev_page_min
) {
1454 dev_err(ctrl
->device
,
1455 "Minimum device page size %u too large for host (%u)\n",
1456 1 << dev_page_min
, 1 << page_shift
);
1460 ctrl
->page_size
= 1 << page_shift
;
1462 ctrl
->ctrl_config
= NVME_CC_CSS_NVM
;
1463 ctrl
->ctrl_config
|= (page_shift
- 12) << NVME_CC_MPS_SHIFT
;
1464 ctrl
->ctrl_config
|= NVME_CC_AMS_RR
| NVME_CC_SHN_NONE
;
1465 ctrl
->ctrl_config
|= NVME_CC_IOSQES
| NVME_CC_IOCQES
;
1466 ctrl
->ctrl_config
|= NVME_CC_ENABLE
;
1468 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1471 return nvme_wait_ready(ctrl
, cap
, true);
1473 EXPORT_SYMBOL_GPL(nvme_enable_ctrl
);
1475 int nvme_shutdown_ctrl(struct nvme_ctrl
*ctrl
)
1477 unsigned long timeout
= jiffies
+ (ctrl
->shutdown_timeout
* HZ
);
1481 ctrl
->ctrl_config
&= ~NVME_CC_SHN_MASK
;
1482 ctrl
->ctrl_config
|= NVME_CC_SHN_NORMAL
;
1484 ret
= ctrl
->ops
->reg_write32(ctrl
, NVME_REG_CC
, ctrl
->ctrl_config
);
1488 while ((ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
)) == 0) {
1489 if ((csts
& NVME_CSTS_SHST_MASK
) == NVME_CSTS_SHST_CMPLT
)
1493 if (fatal_signal_pending(current
))
1495 if (time_after(jiffies
, timeout
)) {
1496 dev_err(ctrl
->device
,
1497 "Device shutdown incomplete; abort shutdown\n");
1504 EXPORT_SYMBOL_GPL(nvme_shutdown_ctrl
);
1506 static void nvme_set_queue_limits(struct nvme_ctrl
*ctrl
,
1507 struct request_queue
*q
)
1511 if (ctrl
->max_hw_sectors
) {
1513 (ctrl
->max_hw_sectors
/ (ctrl
->page_size
>> 9)) + 1;
1515 blk_queue_max_hw_sectors(q
, ctrl
->max_hw_sectors
);
1516 blk_queue_max_segments(q
, min_t(u32
, max_segments
, USHRT_MAX
));
1518 if (ctrl
->quirks
& NVME_QUIRK_STRIPE_SIZE
)
1519 blk_queue_chunk_sectors(q
, ctrl
->max_hw_sectors
);
1520 blk_queue_virt_boundary(q
, ctrl
->page_size
- 1);
1521 if (ctrl
->vwc
& NVME_CTRL_VWC_PRESENT
)
1523 blk_queue_write_cache(q
, vwc
, vwc
);
1526 static int nvme_configure_timestamp(struct nvme_ctrl
*ctrl
)
1531 if (!(ctrl
->oncs
& NVME_CTRL_ONCS_TIMESTAMP
))
1534 ts
= cpu_to_le64(ktime_to_ms(ktime_get_real()));
1535 ret
= nvme_set_features(ctrl
, NVME_FEAT_TIMESTAMP
, 0, &ts
, sizeof(ts
),
1538 dev_warn_once(ctrl
->device
,
1539 "could not set timestamp (%d)\n", ret
);
1543 static int nvme_configure_apst(struct nvme_ctrl
*ctrl
)
1546 * APST (Autonomous Power State Transition) lets us program a
1547 * table of power state transitions that the controller will
1548 * perform automatically. We configure it with a simple
1549 * heuristic: we are willing to spend at most 2% of the time
1550 * transitioning between power states. Therefore, when running
1551 * in any given state, we will enter the next lower-power
1552 * non-operational state after waiting 50 * (enlat + exlat)
1553 * microseconds, as long as that state's exit latency is under
1554 * the requested maximum latency.
1556 * We will not autonomously enter any non-operational state for
1557 * which the total latency exceeds ps_max_latency_us. Users
1558 * can set ps_max_latency_us to zero to turn off APST.
1562 struct nvme_feat_auto_pst
*table
;
1568 * If APST isn't supported or if we haven't been initialized yet,
1569 * then don't do anything.
1574 if (ctrl
->npss
> 31) {
1575 dev_warn(ctrl
->device
, "NPSS is invalid; not using APST\n");
1579 table
= kzalloc(sizeof(*table
), GFP_KERNEL
);
1583 if (!ctrl
->apst_enabled
|| ctrl
->ps_max_latency_us
== 0) {
1584 /* Turn off APST. */
1586 dev_dbg(ctrl
->device
, "APST disabled\n");
1588 __le64 target
= cpu_to_le64(0);
1592 * Walk through all states from lowest- to highest-power.
1593 * According to the spec, lower-numbered states use more
1594 * power. NPSS, despite the name, is the index of the
1595 * lowest-power state, not the number of states.
1597 for (state
= (int)ctrl
->npss
; state
>= 0; state
--) {
1598 u64 total_latency_us
, exit_latency_us
, transition_ms
;
1601 table
->entries
[state
] = target
;
1604 * Don't allow transitions to the deepest state
1605 * if it's quirked off.
1607 if (state
== ctrl
->npss
&&
1608 (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
))
1612 * Is this state a useful non-operational state for
1613 * higher-power states to autonomously transition to?
1615 if (!(ctrl
->psd
[state
].flags
&
1616 NVME_PS_FLAGS_NON_OP_STATE
))
1620 (u64
)le32_to_cpu(ctrl
->psd
[state
].exit_lat
);
1621 if (exit_latency_us
> ctrl
->ps_max_latency_us
)
1626 le32_to_cpu(ctrl
->psd
[state
].entry_lat
);
1629 * This state is good. Use it as the APST idle
1630 * target for higher power states.
1632 transition_ms
= total_latency_us
+ 19;
1633 do_div(transition_ms
, 20);
1634 if (transition_ms
> (1 << 24) - 1)
1635 transition_ms
= (1 << 24) - 1;
1637 target
= cpu_to_le64((state
<< 3) |
1638 (transition_ms
<< 8));
1643 if (total_latency_us
> max_lat_us
)
1644 max_lat_us
= total_latency_us
;
1650 dev_dbg(ctrl
->device
, "APST enabled but no non-operational states are available\n");
1652 dev_dbg(ctrl
->device
, "APST enabled: max PS = %d, max round-trip latency = %lluus, table = %*phN\n",
1653 max_ps
, max_lat_us
, (int)sizeof(*table
), table
);
1657 ret
= nvme_set_features(ctrl
, NVME_FEAT_AUTO_PST
, apste
,
1658 table
, sizeof(*table
), NULL
);
1660 dev_err(ctrl
->device
, "failed to set APST feature (%d)\n", ret
);
1666 static void nvme_set_latency_tolerance(struct device
*dev
, s32 val
)
1668 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
1672 case PM_QOS_LATENCY_TOLERANCE_NO_CONSTRAINT
:
1673 case PM_QOS_LATENCY_ANY
:
1681 if (ctrl
->ps_max_latency_us
!= latency
) {
1682 ctrl
->ps_max_latency_us
= latency
;
1683 nvme_configure_apst(ctrl
);
1687 struct nvme_core_quirk_entry
{
1689 * NVMe model and firmware strings are padded with spaces. For
1690 * simplicity, strings in the quirk table are padded with NULLs
1696 unsigned long quirks
;
1699 static const struct nvme_core_quirk_entry core_quirks
[] = {
1702 * This Toshiba device seems to die using any APST states. See:
1703 * https://bugs.launchpad.net/ubuntu/+source/linux/+bug/1678184/comments/11
1706 .mn
= "THNSF5256GPUK TOSHIBA",
1707 .quirks
= NVME_QUIRK_NO_APST
,
1711 /* match is null-terminated but idstr is space-padded. */
1712 static bool string_matches(const char *idstr
, const char *match
, size_t len
)
1719 matchlen
= strlen(match
);
1720 WARN_ON_ONCE(matchlen
> len
);
1722 if (memcmp(idstr
, match
, matchlen
))
1725 for (; matchlen
< len
; matchlen
++)
1726 if (idstr
[matchlen
] != ' ')
1732 static bool quirk_matches(const struct nvme_id_ctrl
*id
,
1733 const struct nvme_core_quirk_entry
*q
)
1735 return q
->vid
== le16_to_cpu(id
->vid
) &&
1736 string_matches(id
->mn
, q
->mn
, sizeof(id
->mn
)) &&
1737 string_matches(id
->fr
, q
->fr
, sizeof(id
->fr
));
1740 static void nvme_init_subnqn(struct nvme_ctrl
*ctrl
, struct nvme_id_ctrl
*id
)
1745 nqnlen
= strnlen(id
->subnqn
, NVMF_NQN_SIZE
);
1746 if (nqnlen
> 0 && nqnlen
< NVMF_NQN_SIZE
) {
1747 strcpy(ctrl
->subnqn
, id
->subnqn
);
1751 if (ctrl
->vs
>= NVME_VS(1, 2, 1))
1752 dev_warn(ctrl
->device
, "missing or invalid SUBNQN field.\n");
1754 /* Generate a "fake" NQN per Figure 254 in NVMe 1.3 + ECN 001 */
1755 off
= snprintf(ctrl
->subnqn
, NVMF_NQN_SIZE
,
1756 "nqn.2014.08.org.nvmexpress:%4x%4x",
1757 le16_to_cpu(id
->vid
), le16_to_cpu(id
->ssvid
));
1758 memcpy(ctrl
->subnqn
+ off
, id
->sn
, sizeof(id
->sn
));
1759 off
+= sizeof(id
->sn
);
1760 memcpy(ctrl
->subnqn
+ off
, id
->mn
, sizeof(id
->mn
));
1761 off
+= sizeof(id
->mn
);
1762 memset(ctrl
->subnqn
+ off
, 0, sizeof(ctrl
->subnqn
) - off
);
1766 * Initialize the cached copies of the Identify data and various controller
1767 * register in our nvme_ctrl structure. This should be called as soon as
1768 * the admin queue is fully up and running.
1770 int nvme_init_identify(struct nvme_ctrl
*ctrl
)
1772 struct nvme_id_ctrl
*id
;
1774 int ret
, page_shift
;
1776 bool prev_apst_enabled
;
1778 ret
= ctrl
->ops
->reg_read32(ctrl
, NVME_REG_VS
, &ctrl
->vs
);
1780 dev_err(ctrl
->device
, "Reading VS failed (%d)\n", ret
);
1784 ret
= ctrl
->ops
->reg_read64(ctrl
, NVME_REG_CAP
, &cap
);
1786 dev_err(ctrl
->device
, "Reading CAP failed (%d)\n", ret
);
1789 page_shift
= NVME_CAP_MPSMIN(cap
) + 12;
1791 if (ctrl
->vs
>= NVME_VS(1, 1, 0))
1792 ctrl
->subsystem
= NVME_CAP_NSSRC(cap
);
1794 ret
= nvme_identify_ctrl(ctrl
, &id
);
1796 dev_err(ctrl
->device
, "Identify Controller failed (%d)\n", ret
);
1800 nvme_init_subnqn(ctrl
, id
);
1802 if (!ctrl
->identified
) {
1804 * Check for quirks. Quirk can depend on firmware version,
1805 * so, in principle, the set of quirks present can change
1806 * across a reset. As a possible future enhancement, we
1807 * could re-scan for quirks every time we reinitialize
1808 * the device, but we'd have to make sure that the driver
1809 * behaves intelligently if the quirks change.
1814 for (i
= 0; i
< ARRAY_SIZE(core_quirks
); i
++) {
1815 if (quirk_matches(id
, &core_quirks
[i
]))
1816 ctrl
->quirks
|= core_quirks
[i
].quirks
;
1820 if (force_apst
&& (ctrl
->quirks
& NVME_QUIRK_NO_DEEPEST_PS
)) {
1821 dev_warn(ctrl
->device
, "forcibly allowing all power states due to nvme_core.force_apst -- use at your own risk\n");
1822 ctrl
->quirks
&= ~NVME_QUIRK_NO_DEEPEST_PS
;
1825 ctrl
->oacs
= le16_to_cpu(id
->oacs
);
1826 ctrl
->vid
= le16_to_cpu(id
->vid
);
1827 ctrl
->oncs
= le16_to_cpup(&id
->oncs
);
1828 atomic_set(&ctrl
->abort_limit
, id
->acl
+ 1);
1829 ctrl
->vwc
= id
->vwc
;
1830 ctrl
->cntlid
= le16_to_cpup(&id
->cntlid
);
1831 memcpy(ctrl
->serial
, id
->sn
, sizeof(id
->sn
));
1832 memcpy(ctrl
->model
, id
->mn
, sizeof(id
->mn
));
1833 memcpy(ctrl
->firmware_rev
, id
->fr
, sizeof(id
->fr
));
1835 max_hw_sectors
= 1 << (id
->mdts
+ page_shift
- 9);
1837 max_hw_sectors
= UINT_MAX
;
1838 ctrl
->max_hw_sectors
=
1839 min_not_zero(ctrl
->max_hw_sectors
, max_hw_sectors
);
1841 nvme_set_queue_limits(ctrl
, ctrl
->admin_q
);
1842 ctrl
->sgls
= le32_to_cpu(id
->sgls
);
1843 ctrl
->kas
= le16_to_cpu(id
->kas
);
1847 u32 transition_time
= le32_to_cpu(id
->rtd3e
) / 1000000;
1849 ctrl
->shutdown_timeout
= clamp_t(unsigned int, transition_time
,
1850 shutdown_timeout
, 60);
1852 if (ctrl
->shutdown_timeout
!= shutdown_timeout
)
1853 dev_warn(ctrl
->device
,
1854 "Shutdown timeout set to %u seconds\n",
1855 ctrl
->shutdown_timeout
);
1857 ctrl
->shutdown_timeout
= shutdown_timeout
;
1859 ctrl
->npss
= id
->npss
;
1860 ctrl
->apsta
= id
->apsta
;
1861 prev_apst_enabled
= ctrl
->apst_enabled
;
1862 if (ctrl
->quirks
& NVME_QUIRK_NO_APST
) {
1863 if (force_apst
&& id
->apsta
) {
1864 dev_warn(ctrl
->device
, "forcibly allowing APST due to nvme_core.force_apst -- use at your own risk\n");
1865 ctrl
->apst_enabled
= true;
1867 ctrl
->apst_enabled
= false;
1870 ctrl
->apst_enabled
= id
->apsta
;
1872 memcpy(ctrl
->psd
, id
->psd
, sizeof(ctrl
->psd
));
1874 if (ctrl
->ops
->flags
& NVME_F_FABRICS
) {
1875 ctrl
->icdoff
= le16_to_cpu(id
->icdoff
);
1876 ctrl
->ioccsz
= le32_to_cpu(id
->ioccsz
);
1877 ctrl
->iorcsz
= le32_to_cpu(id
->iorcsz
);
1878 ctrl
->maxcmd
= le16_to_cpu(id
->maxcmd
);
1881 * In fabrics we need to verify the cntlid matches the
1884 if (ctrl
->cntlid
!= le16_to_cpu(id
->cntlid
)) {
1889 if (!ctrl
->opts
->discovery_nqn
&& !ctrl
->kas
) {
1890 dev_err(ctrl
->device
,
1891 "keep-alive support is mandatory for fabrics\n");
1896 ctrl
->cntlid
= le16_to_cpu(id
->cntlid
);
1897 ctrl
->hmpre
= le32_to_cpu(id
->hmpre
);
1898 ctrl
->hmmin
= le32_to_cpu(id
->hmmin
);
1899 ctrl
->hmminds
= le32_to_cpu(id
->hmminds
);
1900 ctrl
->hmmaxd
= le16_to_cpu(id
->hmmaxd
);
1905 if (ctrl
->apst_enabled
&& !prev_apst_enabled
)
1906 dev_pm_qos_expose_latency_tolerance(ctrl
->device
);
1907 else if (!ctrl
->apst_enabled
&& prev_apst_enabled
)
1908 dev_pm_qos_hide_latency_tolerance(ctrl
->device
);
1910 ret
= nvme_configure_apst(ctrl
);
1914 ret
= nvme_configure_timestamp(ctrl
);
1918 ret
= nvme_configure_directives(ctrl
);
1922 ctrl
->identified
= true;
1930 EXPORT_SYMBOL_GPL(nvme_init_identify
);
1932 static int nvme_dev_open(struct inode
*inode
, struct file
*file
)
1934 struct nvme_ctrl
*ctrl
;
1935 int instance
= iminor(inode
);
1938 spin_lock(&dev_list_lock
);
1939 list_for_each_entry(ctrl
, &nvme_ctrl_list
, node
) {
1940 if (ctrl
->instance
!= instance
)
1943 if (!ctrl
->admin_q
) {
1947 if (!kref_get_unless_zero(&ctrl
->kref
))
1949 file
->private_data
= ctrl
;
1953 spin_unlock(&dev_list_lock
);
1958 static int nvme_dev_release(struct inode
*inode
, struct file
*file
)
1960 nvme_put_ctrl(file
->private_data
);
1964 static int nvme_dev_user_cmd(struct nvme_ctrl
*ctrl
, void __user
*argp
)
1969 mutex_lock(&ctrl
->namespaces_mutex
);
1970 if (list_empty(&ctrl
->namespaces
)) {
1975 ns
= list_first_entry(&ctrl
->namespaces
, struct nvme_ns
, list
);
1976 if (ns
!= list_last_entry(&ctrl
->namespaces
, struct nvme_ns
, list
)) {
1977 dev_warn(ctrl
->device
,
1978 "NVME_IOCTL_IO_CMD not supported when multiple namespaces present!\n");
1983 dev_warn(ctrl
->device
,
1984 "using deprecated NVME_IOCTL_IO_CMD ioctl on the char device!\n");
1985 kref_get(&ns
->kref
);
1986 mutex_unlock(&ctrl
->namespaces_mutex
);
1988 ret
= nvme_user_cmd(ctrl
, ns
, argp
);
1993 mutex_unlock(&ctrl
->namespaces_mutex
);
1997 static long nvme_dev_ioctl(struct file
*file
, unsigned int cmd
,
2000 struct nvme_ctrl
*ctrl
= file
->private_data
;
2001 void __user
*argp
= (void __user
*)arg
;
2004 case NVME_IOCTL_ADMIN_CMD
:
2005 return nvme_user_cmd(ctrl
, NULL
, argp
);
2006 case NVME_IOCTL_IO_CMD
:
2007 return nvme_dev_user_cmd(ctrl
, argp
);
2008 case NVME_IOCTL_RESET
:
2009 dev_warn(ctrl
->device
, "resetting controller\n");
2010 return nvme_reset_ctrl_sync(ctrl
);
2011 case NVME_IOCTL_SUBSYS_RESET
:
2012 return nvme_reset_subsystem(ctrl
);
2013 case NVME_IOCTL_RESCAN
:
2014 nvme_queue_scan(ctrl
);
2021 static const struct file_operations nvme_dev_fops
= {
2022 .owner
= THIS_MODULE
,
2023 .open
= nvme_dev_open
,
2024 .release
= nvme_dev_release
,
2025 .unlocked_ioctl
= nvme_dev_ioctl
,
2026 .compat_ioctl
= nvme_dev_ioctl
,
2029 static ssize_t
nvme_sysfs_reset(struct device
*dev
,
2030 struct device_attribute
*attr
, const char *buf
,
2033 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2036 ret
= nvme_reset_ctrl_sync(ctrl
);
2041 static DEVICE_ATTR(reset_controller
, S_IWUSR
, NULL
, nvme_sysfs_reset
);
2043 static ssize_t
nvme_sysfs_rescan(struct device
*dev
,
2044 struct device_attribute
*attr
, const char *buf
,
2047 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2049 nvme_queue_scan(ctrl
);
2052 static DEVICE_ATTR(rescan_controller
, S_IWUSR
, NULL
, nvme_sysfs_rescan
);
2054 static ssize_t
wwid_show(struct device
*dev
, struct device_attribute
*attr
,
2057 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2058 struct nvme_ctrl
*ctrl
= ns
->ctrl
;
2059 int serial_len
= sizeof(ctrl
->serial
);
2060 int model_len
= sizeof(ctrl
->model
);
2062 if (!uuid_is_null(&ns
->uuid
))
2063 return sprintf(buf
, "uuid.%pU\n", &ns
->uuid
);
2065 if (memchr_inv(ns
->nguid
, 0, sizeof(ns
->nguid
)))
2066 return sprintf(buf
, "eui.%16phN\n", ns
->nguid
);
2068 if (memchr_inv(ns
->eui
, 0, sizeof(ns
->eui
)))
2069 return sprintf(buf
, "eui.%8phN\n", ns
->eui
);
2071 while (serial_len
> 0 && (ctrl
->serial
[serial_len
- 1] == ' ' ||
2072 ctrl
->serial
[serial_len
- 1] == '\0'))
2074 while (model_len
> 0 && (ctrl
->model
[model_len
- 1] == ' ' ||
2075 ctrl
->model
[model_len
- 1] == '\0'))
2078 return sprintf(buf
, "nvme.%04x-%*phN-%*phN-%08x\n", ctrl
->vid
,
2079 serial_len
, ctrl
->serial
, model_len
, ctrl
->model
, ns
->ns_id
);
2081 static DEVICE_ATTR(wwid
, S_IRUGO
, wwid_show
, NULL
);
2083 static ssize_t
nguid_show(struct device
*dev
, struct device_attribute
*attr
,
2086 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2087 return sprintf(buf
, "%pU\n", ns
->nguid
);
2089 static DEVICE_ATTR(nguid
, S_IRUGO
, nguid_show
, NULL
);
2091 static ssize_t
uuid_show(struct device
*dev
, struct device_attribute
*attr
,
2094 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2096 /* For backward compatibility expose the NGUID to userspace if
2097 * we have no UUID set
2099 if (uuid_is_null(&ns
->uuid
)) {
2100 printk_ratelimited(KERN_WARNING
2101 "No UUID available providing old NGUID\n");
2102 return sprintf(buf
, "%pU\n", ns
->nguid
);
2104 return sprintf(buf
, "%pU\n", &ns
->uuid
);
2106 static DEVICE_ATTR(uuid
, S_IRUGO
, uuid_show
, NULL
);
2108 static ssize_t
eui_show(struct device
*dev
, struct device_attribute
*attr
,
2111 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2112 return sprintf(buf
, "%8phd\n", ns
->eui
);
2114 static DEVICE_ATTR(eui
, S_IRUGO
, eui_show
, NULL
);
2116 static ssize_t
nsid_show(struct device
*dev
, struct device_attribute
*attr
,
2119 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2120 return sprintf(buf
, "%d\n", ns
->ns_id
);
2122 static DEVICE_ATTR(nsid
, S_IRUGO
, nsid_show
, NULL
);
2124 static struct attribute
*nvme_ns_attrs
[] = {
2125 &dev_attr_wwid
.attr
,
2126 &dev_attr_uuid
.attr
,
2127 &dev_attr_nguid
.attr
,
2129 &dev_attr_nsid
.attr
,
2133 static umode_t
nvme_ns_attrs_are_visible(struct kobject
*kobj
,
2134 struct attribute
*a
, int n
)
2136 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
2137 struct nvme_ns
*ns
= nvme_get_ns_from_dev(dev
);
2139 if (a
== &dev_attr_uuid
.attr
) {
2140 if (uuid_is_null(&ns
->uuid
) &&
2141 !memchr_inv(ns
->nguid
, 0, sizeof(ns
->nguid
)))
2144 if (a
== &dev_attr_nguid
.attr
) {
2145 if (!memchr_inv(ns
->nguid
, 0, sizeof(ns
->nguid
)))
2148 if (a
== &dev_attr_eui
.attr
) {
2149 if (!memchr_inv(ns
->eui
, 0, sizeof(ns
->eui
)))
2155 static const struct attribute_group nvme_ns_attr_group
= {
2156 .attrs
= nvme_ns_attrs
,
2157 .is_visible
= nvme_ns_attrs_are_visible
,
2160 #define nvme_show_str_function(field) \
2161 static ssize_t field##_show(struct device *dev, \
2162 struct device_attribute *attr, char *buf) \
2164 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
2165 return sprintf(buf, "%.*s\n", (int)sizeof(ctrl->field), ctrl->field); \
2167 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
2169 #define nvme_show_int_function(field) \
2170 static ssize_t field##_show(struct device *dev, \
2171 struct device_attribute *attr, char *buf) \
2173 struct nvme_ctrl *ctrl = dev_get_drvdata(dev); \
2174 return sprintf(buf, "%d\n", ctrl->field); \
2176 static DEVICE_ATTR(field, S_IRUGO, field##_show, NULL);
2178 nvme_show_str_function(model
);
2179 nvme_show_str_function(serial
);
2180 nvme_show_str_function(firmware_rev
);
2181 nvme_show_int_function(cntlid
);
2183 static ssize_t
nvme_sysfs_delete(struct device
*dev
,
2184 struct device_attribute
*attr
, const char *buf
,
2187 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2189 if (device_remove_file_self(dev
, attr
))
2190 ctrl
->ops
->delete_ctrl(ctrl
);
2193 static DEVICE_ATTR(delete_controller
, S_IWUSR
, NULL
, nvme_sysfs_delete
);
2195 static ssize_t
nvme_sysfs_show_transport(struct device
*dev
,
2196 struct device_attribute
*attr
,
2199 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2201 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->ops
->name
);
2203 static DEVICE_ATTR(transport
, S_IRUGO
, nvme_sysfs_show_transport
, NULL
);
2205 static ssize_t
nvme_sysfs_show_state(struct device
*dev
,
2206 struct device_attribute
*attr
,
2209 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2210 static const char *const state_name
[] = {
2211 [NVME_CTRL_NEW
] = "new",
2212 [NVME_CTRL_LIVE
] = "live",
2213 [NVME_CTRL_RESETTING
] = "resetting",
2214 [NVME_CTRL_RECONNECTING
]= "reconnecting",
2215 [NVME_CTRL_DELETING
] = "deleting",
2216 [NVME_CTRL_DEAD
] = "dead",
2219 if ((unsigned)ctrl
->state
< ARRAY_SIZE(state_name
) &&
2220 state_name
[ctrl
->state
])
2221 return sprintf(buf
, "%s\n", state_name
[ctrl
->state
]);
2223 return sprintf(buf
, "unknown state\n");
2226 static DEVICE_ATTR(state
, S_IRUGO
, nvme_sysfs_show_state
, NULL
);
2228 static ssize_t
nvme_sysfs_show_subsysnqn(struct device
*dev
,
2229 struct device_attribute
*attr
,
2232 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2234 return snprintf(buf
, PAGE_SIZE
, "%s\n", ctrl
->subnqn
);
2236 static DEVICE_ATTR(subsysnqn
, S_IRUGO
, nvme_sysfs_show_subsysnqn
, NULL
);
2238 static ssize_t
nvme_sysfs_show_address(struct device
*dev
,
2239 struct device_attribute
*attr
,
2242 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2244 return ctrl
->ops
->get_address(ctrl
, buf
, PAGE_SIZE
);
2246 static DEVICE_ATTR(address
, S_IRUGO
, nvme_sysfs_show_address
, NULL
);
2248 static struct attribute
*nvme_dev_attrs
[] = {
2249 &dev_attr_reset_controller
.attr
,
2250 &dev_attr_rescan_controller
.attr
,
2251 &dev_attr_model
.attr
,
2252 &dev_attr_serial
.attr
,
2253 &dev_attr_firmware_rev
.attr
,
2254 &dev_attr_cntlid
.attr
,
2255 &dev_attr_delete_controller
.attr
,
2256 &dev_attr_transport
.attr
,
2257 &dev_attr_subsysnqn
.attr
,
2258 &dev_attr_address
.attr
,
2259 &dev_attr_state
.attr
,
2263 static umode_t
nvme_dev_attrs_are_visible(struct kobject
*kobj
,
2264 struct attribute
*a
, int n
)
2266 struct device
*dev
= container_of(kobj
, struct device
, kobj
);
2267 struct nvme_ctrl
*ctrl
= dev_get_drvdata(dev
);
2269 if (a
== &dev_attr_delete_controller
.attr
&& !ctrl
->ops
->delete_ctrl
)
2271 if (a
== &dev_attr_address
.attr
&& !ctrl
->ops
->get_address
)
2277 static struct attribute_group nvme_dev_attrs_group
= {
2278 .attrs
= nvme_dev_attrs
,
2279 .is_visible
= nvme_dev_attrs_are_visible
,
2282 static const struct attribute_group
*nvme_dev_attr_groups
[] = {
2283 &nvme_dev_attrs_group
,
2287 static int ns_cmp(void *priv
, struct list_head
*a
, struct list_head
*b
)
2289 struct nvme_ns
*nsa
= container_of(a
, struct nvme_ns
, list
);
2290 struct nvme_ns
*nsb
= container_of(b
, struct nvme_ns
, list
);
2292 return nsa
->ns_id
- nsb
->ns_id
;
2295 static struct nvme_ns
*nvme_find_get_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
2297 struct nvme_ns
*ns
, *ret
= NULL
;
2299 mutex_lock(&ctrl
->namespaces_mutex
);
2300 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2301 if (ns
->ns_id
== nsid
) {
2302 if (!kref_get_unless_zero(&ns
->kref
))
2307 if (ns
->ns_id
> nsid
)
2310 mutex_unlock(&ctrl
->namespaces_mutex
);
2314 static int nvme_setup_streams_ns(struct nvme_ctrl
*ctrl
, struct nvme_ns
*ns
)
2316 struct streams_directive_params s
;
2319 if (!ctrl
->nr_streams
)
2322 ret
= nvme_get_stream_params(ctrl
, &s
, ns
->ns_id
);
2326 ns
->sws
= le32_to_cpu(s
.sws
);
2327 ns
->sgs
= le16_to_cpu(s
.sgs
);
2330 unsigned int bs
= 1 << ns
->lba_shift
;
2332 blk_queue_io_min(ns
->queue
, bs
* ns
->sws
);
2334 blk_queue_io_opt(ns
->queue
, bs
* ns
->sws
* ns
->sgs
);
2340 static void nvme_alloc_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
2343 struct gendisk
*disk
;
2344 struct nvme_id_ns
*id
;
2345 char disk_name
[DISK_NAME_LEN
];
2346 int node
= dev_to_node(ctrl
->dev
);
2348 ns
= kzalloc_node(sizeof(*ns
), GFP_KERNEL
, node
);
2352 ns
->instance
= ida_simple_get(&ctrl
->ns_ida
, 1, 0, GFP_KERNEL
);
2353 if (ns
->instance
< 0)
2356 ns
->queue
= blk_mq_init_queue(ctrl
->tagset
);
2357 if (IS_ERR(ns
->queue
))
2358 goto out_release_instance
;
2359 queue_flag_set_unlocked(QUEUE_FLAG_NONROT
, ns
->queue
);
2360 ns
->queue
->queuedata
= ns
;
2363 kref_init(&ns
->kref
);
2365 ns
->lba_shift
= 9; /* set to a default value for 512 until disk is validated */
2367 blk_queue_logical_block_size(ns
->queue
, 1 << ns
->lba_shift
);
2368 nvme_set_queue_limits(ctrl
, ns
->queue
);
2369 nvme_setup_streams_ns(ctrl
, ns
);
2371 sprintf(disk_name
, "nvme%dn%d", ctrl
->instance
, ns
->instance
);
2373 id
= nvme_identify_ns(ctrl
, nsid
);
2375 goto out_free_queue
;
2380 nvme_report_ns_ids(ctrl
, ns
->ns_id
, id
, ns
->eui
, ns
->nguid
, &ns
->uuid
);
2382 if ((ctrl
->quirks
& NVME_QUIRK_LIGHTNVM
) && id
->vs
[0] == 0x1) {
2383 if (nvme_nvm_register(ns
, disk_name
, node
)) {
2384 dev_warn(ctrl
->device
, "LightNVM init failure\n");
2389 disk
= alloc_disk_node(0, node
);
2393 disk
->fops
= &nvme_fops
;
2394 disk
->private_data
= ns
;
2395 disk
->queue
= ns
->queue
;
2396 disk
->flags
= GENHD_FL_EXT_DEVT
;
2397 memcpy(disk
->disk_name
, disk_name
, DISK_NAME_LEN
);
2400 __nvme_revalidate_disk(disk
, id
);
2402 mutex_lock(&ctrl
->namespaces_mutex
);
2403 list_add_tail(&ns
->list
, &ctrl
->namespaces
);
2404 mutex_unlock(&ctrl
->namespaces_mutex
);
2406 kref_get(&ctrl
->kref
);
2410 device_add_disk(ctrl
->device
, ns
->disk
);
2411 if (sysfs_create_group(&disk_to_dev(ns
->disk
)->kobj
,
2412 &nvme_ns_attr_group
))
2413 pr_warn("%s: failed to create sysfs group for identification\n",
2414 ns
->disk
->disk_name
);
2415 if (ns
->ndev
&& nvme_nvm_register_sysfs(ns
))
2416 pr_warn("%s: failed to register lightnvm sysfs group for identification\n",
2417 ns
->disk
->disk_name
);
2422 blk_cleanup_queue(ns
->queue
);
2423 out_release_instance
:
2424 ida_simple_remove(&ctrl
->ns_ida
, ns
->instance
);
2429 static void nvme_ns_remove(struct nvme_ns
*ns
)
2431 if (test_and_set_bit(NVME_NS_REMOVING
, &ns
->flags
))
2434 if (ns
->disk
&& ns
->disk
->flags
& GENHD_FL_UP
) {
2435 if (blk_get_integrity(ns
->disk
))
2436 blk_integrity_unregister(ns
->disk
);
2437 sysfs_remove_group(&disk_to_dev(ns
->disk
)->kobj
,
2438 &nvme_ns_attr_group
);
2440 nvme_nvm_unregister_sysfs(ns
);
2441 del_gendisk(ns
->disk
);
2442 blk_cleanup_queue(ns
->queue
);
2445 mutex_lock(&ns
->ctrl
->namespaces_mutex
);
2446 list_del_init(&ns
->list
);
2447 mutex_unlock(&ns
->ctrl
->namespaces_mutex
);
2452 static void nvme_validate_ns(struct nvme_ctrl
*ctrl
, unsigned nsid
)
2456 ns
= nvme_find_get_ns(ctrl
, nsid
);
2458 if (ns
->disk
&& revalidate_disk(ns
->disk
))
2462 nvme_alloc_ns(ctrl
, nsid
);
2465 static void nvme_remove_invalid_namespaces(struct nvme_ctrl
*ctrl
,
2468 struct nvme_ns
*ns
, *next
;
2470 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
) {
2471 if (ns
->ns_id
> nsid
)
2476 static int nvme_scan_ns_list(struct nvme_ctrl
*ctrl
, unsigned nn
)
2480 unsigned i
, j
, nsid
, prev
= 0, num_lists
= DIV_ROUND_UP(nn
, 1024);
2483 ns_list
= kzalloc(0x1000, GFP_KERNEL
);
2487 for (i
= 0; i
< num_lists
; i
++) {
2488 ret
= nvme_identify_ns_list(ctrl
, prev
, ns_list
);
2492 for (j
= 0; j
< min(nn
, 1024U); j
++) {
2493 nsid
= le32_to_cpu(ns_list
[j
]);
2497 nvme_validate_ns(ctrl
, nsid
);
2499 while (++prev
< nsid
) {
2500 ns
= nvme_find_get_ns(ctrl
, prev
);
2510 nvme_remove_invalid_namespaces(ctrl
, prev
);
2516 static void nvme_scan_ns_sequential(struct nvme_ctrl
*ctrl
, unsigned nn
)
2520 for (i
= 1; i
<= nn
; i
++)
2521 nvme_validate_ns(ctrl
, i
);
2523 nvme_remove_invalid_namespaces(ctrl
, nn
);
2526 static void nvme_scan_work(struct work_struct
*work
)
2528 struct nvme_ctrl
*ctrl
=
2529 container_of(work
, struct nvme_ctrl
, scan_work
);
2530 struct nvme_id_ctrl
*id
;
2533 if (ctrl
->state
!= NVME_CTRL_LIVE
)
2536 if (nvme_identify_ctrl(ctrl
, &id
))
2539 nn
= le32_to_cpu(id
->nn
);
2540 if (ctrl
->vs
>= NVME_VS(1, 1, 0) &&
2541 !(ctrl
->quirks
& NVME_QUIRK_IDENTIFY_CNS
)) {
2542 if (!nvme_scan_ns_list(ctrl
, nn
))
2545 nvme_scan_ns_sequential(ctrl
, nn
);
2547 mutex_lock(&ctrl
->namespaces_mutex
);
2548 list_sort(NULL
, &ctrl
->namespaces
, ns_cmp
);
2549 mutex_unlock(&ctrl
->namespaces_mutex
);
2553 void nvme_queue_scan(struct nvme_ctrl
*ctrl
)
2556 * Do not queue new scan work when a controller is reset during
2559 if (ctrl
->state
== NVME_CTRL_LIVE
)
2560 queue_work(nvme_wq
, &ctrl
->scan_work
);
2562 EXPORT_SYMBOL_GPL(nvme_queue_scan
);
2565 * This function iterates the namespace list unlocked to allow recovery from
2566 * controller failure. It is up to the caller to ensure the namespace list is
2567 * not modified by scan work while this function is executing.
2569 void nvme_remove_namespaces(struct nvme_ctrl
*ctrl
)
2571 struct nvme_ns
*ns
, *next
;
2574 * The dead states indicates the controller was not gracefully
2575 * disconnected. In that case, we won't be able to flush any data while
2576 * removing the namespaces' disks; fail all the queues now to avoid
2577 * potentially having to clean up the failed sync later.
2579 if (ctrl
->state
== NVME_CTRL_DEAD
)
2580 nvme_kill_queues(ctrl
);
2582 list_for_each_entry_safe(ns
, next
, &ctrl
->namespaces
, list
)
2585 EXPORT_SYMBOL_GPL(nvme_remove_namespaces
);
2587 static void nvme_async_event_work(struct work_struct
*work
)
2589 struct nvme_ctrl
*ctrl
=
2590 container_of(work
, struct nvme_ctrl
, async_event_work
);
2592 spin_lock_irq(&ctrl
->lock
);
2593 while (ctrl
->state
== NVME_CTRL_LIVE
&& ctrl
->event_limit
> 0) {
2594 int aer_idx
= --ctrl
->event_limit
;
2596 spin_unlock_irq(&ctrl
->lock
);
2597 ctrl
->ops
->submit_async_event(ctrl
, aer_idx
);
2598 spin_lock_irq(&ctrl
->lock
);
2600 spin_unlock_irq(&ctrl
->lock
);
2603 static bool nvme_ctrl_pp_status(struct nvme_ctrl
*ctrl
)
2608 if (ctrl
->ops
->reg_read32(ctrl
, NVME_REG_CSTS
, &csts
))
2614 return ((ctrl
->ctrl_config
& NVME_CC_ENABLE
) && (csts
& NVME_CSTS_PP
));
2617 static void nvme_get_fw_slot_info(struct nvme_ctrl
*ctrl
)
2619 struct nvme_command c
= { };
2620 struct nvme_fw_slot_info_log
*log
;
2622 log
= kmalloc(sizeof(*log
), GFP_KERNEL
);
2626 c
.common
.opcode
= nvme_admin_get_log_page
;
2627 c
.common
.nsid
= cpu_to_le32(NVME_NSID_ALL
);
2628 c
.common
.cdw10
[0] = nvme_get_log_dw10(NVME_LOG_FW_SLOT
, sizeof(*log
));
2630 if (!nvme_submit_sync_cmd(ctrl
->admin_q
, &c
, log
, sizeof(*log
)))
2631 dev_warn(ctrl
->device
,
2632 "Get FW SLOT INFO log error\n");
2636 static void nvme_fw_act_work(struct work_struct
*work
)
2638 struct nvme_ctrl
*ctrl
= container_of(work
,
2639 struct nvme_ctrl
, fw_act_work
);
2640 unsigned long fw_act_timeout
;
2643 fw_act_timeout
= jiffies
+
2644 msecs_to_jiffies(ctrl
->mtfa
* 100);
2646 fw_act_timeout
= jiffies
+
2647 msecs_to_jiffies(admin_timeout
* 1000);
2649 nvme_stop_queues(ctrl
);
2650 while (nvme_ctrl_pp_status(ctrl
)) {
2651 if (time_after(jiffies
, fw_act_timeout
)) {
2652 dev_warn(ctrl
->device
,
2653 "Fw activation timeout, reset controller\n");
2654 nvme_reset_ctrl(ctrl
);
2660 if (ctrl
->state
!= NVME_CTRL_LIVE
)
2663 nvme_start_queues(ctrl
);
2664 /* read FW slot informationi to clear the AER*/
2665 nvme_get_fw_slot_info(ctrl
);
2668 void nvme_complete_async_event(struct nvme_ctrl
*ctrl
, __le16 status
,
2669 union nvme_result
*res
)
2671 u32 result
= le32_to_cpu(res
->u32
);
2674 switch (le16_to_cpu(status
) >> 1) {
2675 case NVME_SC_SUCCESS
:
2678 case NVME_SC_ABORT_REQ
:
2679 ++ctrl
->event_limit
;
2680 if (ctrl
->state
== NVME_CTRL_LIVE
)
2681 queue_work(nvme_wq
, &ctrl
->async_event_work
);
2690 switch (result
& 0xff07) {
2691 case NVME_AER_NOTICE_NS_CHANGED
:
2692 dev_info(ctrl
->device
, "rescanning\n");
2693 nvme_queue_scan(ctrl
);
2695 case NVME_AER_NOTICE_FW_ACT_STARTING
:
2696 queue_work(nvme_wq
, &ctrl
->fw_act_work
);
2699 dev_warn(ctrl
->device
, "async event result %08x\n", result
);
2702 EXPORT_SYMBOL_GPL(nvme_complete_async_event
);
2704 void nvme_queue_async_events(struct nvme_ctrl
*ctrl
)
2706 ctrl
->event_limit
= NVME_NR_AERS
;
2707 queue_work(nvme_wq
, &ctrl
->async_event_work
);
2709 EXPORT_SYMBOL_GPL(nvme_queue_async_events
);
2711 static DEFINE_IDA(nvme_instance_ida
);
2713 static int nvme_set_instance(struct nvme_ctrl
*ctrl
)
2715 int instance
, error
;
2718 if (!ida_pre_get(&nvme_instance_ida
, GFP_KERNEL
))
2721 spin_lock(&dev_list_lock
);
2722 error
= ida_get_new(&nvme_instance_ida
, &instance
);
2723 spin_unlock(&dev_list_lock
);
2724 } while (error
== -EAGAIN
);
2729 ctrl
->instance
= instance
;
2733 static void nvme_release_instance(struct nvme_ctrl
*ctrl
)
2735 spin_lock(&dev_list_lock
);
2736 ida_remove(&nvme_instance_ida
, ctrl
->instance
);
2737 spin_unlock(&dev_list_lock
);
2740 void nvme_stop_ctrl(struct nvme_ctrl
*ctrl
)
2742 nvme_stop_keep_alive(ctrl
);
2743 flush_work(&ctrl
->async_event_work
);
2744 flush_work(&ctrl
->scan_work
);
2745 cancel_work_sync(&ctrl
->fw_act_work
);
2747 EXPORT_SYMBOL_GPL(nvme_stop_ctrl
);
2749 void nvme_start_ctrl(struct nvme_ctrl
*ctrl
)
2752 nvme_start_keep_alive(ctrl
);
2754 if (ctrl
->queue_count
> 1) {
2755 nvme_queue_scan(ctrl
);
2756 nvme_queue_async_events(ctrl
);
2757 nvme_start_queues(ctrl
);
2760 EXPORT_SYMBOL_GPL(nvme_start_ctrl
);
2762 void nvme_uninit_ctrl(struct nvme_ctrl
*ctrl
)
2764 device_destroy(nvme_class
, MKDEV(nvme_char_major
, ctrl
->instance
));
2766 spin_lock(&dev_list_lock
);
2767 list_del(&ctrl
->node
);
2768 spin_unlock(&dev_list_lock
);
2770 EXPORT_SYMBOL_GPL(nvme_uninit_ctrl
);
2772 static void nvme_free_ctrl(struct kref
*kref
)
2774 struct nvme_ctrl
*ctrl
= container_of(kref
, struct nvme_ctrl
, kref
);
2776 put_device(ctrl
->device
);
2777 nvme_release_instance(ctrl
);
2778 ida_destroy(&ctrl
->ns_ida
);
2780 ctrl
->ops
->free_ctrl(ctrl
);
2783 void nvme_put_ctrl(struct nvme_ctrl
*ctrl
)
2785 kref_put(&ctrl
->kref
, nvme_free_ctrl
);
2787 EXPORT_SYMBOL_GPL(nvme_put_ctrl
);
2790 * Initialize a NVMe controller structures. This needs to be called during
2791 * earliest initialization so that we have the initialized structured around
2794 int nvme_init_ctrl(struct nvme_ctrl
*ctrl
, struct device
*dev
,
2795 const struct nvme_ctrl_ops
*ops
, unsigned long quirks
)
2799 ctrl
->state
= NVME_CTRL_NEW
;
2800 spin_lock_init(&ctrl
->lock
);
2801 INIT_LIST_HEAD(&ctrl
->namespaces
);
2802 mutex_init(&ctrl
->namespaces_mutex
);
2803 kref_init(&ctrl
->kref
);
2806 ctrl
->quirks
= quirks
;
2807 INIT_WORK(&ctrl
->scan_work
, nvme_scan_work
);
2808 INIT_WORK(&ctrl
->async_event_work
, nvme_async_event_work
);
2809 INIT_WORK(&ctrl
->fw_act_work
, nvme_fw_act_work
);
2811 ret
= nvme_set_instance(ctrl
);
2815 ctrl
->device
= device_create_with_groups(nvme_class
, ctrl
->dev
,
2816 MKDEV(nvme_char_major
, ctrl
->instance
),
2817 ctrl
, nvme_dev_attr_groups
,
2818 "nvme%d", ctrl
->instance
);
2819 if (IS_ERR(ctrl
->device
)) {
2820 ret
= PTR_ERR(ctrl
->device
);
2821 goto out_release_instance
;
2823 get_device(ctrl
->device
);
2824 ida_init(&ctrl
->ns_ida
);
2826 spin_lock(&dev_list_lock
);
2827 list_add_tail(&ctrl
->node
, &nvme_ctrl_list
);
2828 spin_unlock(&dev_list_lock
);
2831 * Initialize latency tolerance controls. The sysfs files won't
2832 * be visible to userspace unless the device actually supports APST.
2834 ctrl
->device
->power
.set_latency_tolerance
= nvme_set_latency_tolerance
;
2835 dev_pm_qos_update_user_latency_tolerance(ctrl
->device
,
2836 min(default_ps_max_latency_us
, (unsigned long)S32_MAX
));
2839 out_release_instance
:
2840 nvme_release_instance(ctrl
);
2844 EXPORT_SYMBOL_GPL(nvme_init_ctrl
);
2847 * nvme_kill_queues(): Ends all namespace queues
2848 * @ctrl: the dead controller that needs to end
2850 * Call this function when the driver determines it is unable to get the
2851 * controller in a state capable of servicing IO.
2853 void nvme_kill_queues(struct nvme_ctrl
*ctrl
)
2857 mutex_lock(&ctrl
->namespaces_mutex
);
2859 /* Forcibly unquiesce queues to avoid blocking dispatch */
2861 blk_mq_unquiesce_queue(ctrl
->admin_q
);
2863 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2865 * Revalidating a dead namespace sets capacity to 0. This will
2866 * end buffered writers dirtying pages that can't be synced.
2868 if (!ns
->disk
|| test_and_set_bit(NVME_NS_DEAD
, &ns
->flags
))
2870 revalidate_disk(ns
->disk
);
2871 blk_set_queue_dying(ns
->queue
);
2873 /* Forcibly unquiesce queues to avoid blocking dispatch */
2874 blk_mq_unquiesce_queue(ns
->queue
);
2876 mutex_unlock(&ctrl
->namespaces_mutex
);
2878 EXPORT_SYMBOL_GPL(nvme_kill_queues
);
2880 void nvme_unfreeze(struct nvme_ctrl
*ctrl
)
2884 mutex_lock(&ctrl
->namespaces_mutex
);
2885 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
2886 blk_mq_unfreeze_queue(ns
->queue
);
2887 mutex_unlock(&ctrl
->namespaces_mutex
);
2889 EXPORT_SYMBOL_GPL(nvme_unfreeze
);
2891 void nvme_wait_freeze_timeout(struct nvme_ctrl
*ctrl
, long timeout
)
2895 mutex_lock(&ctrl
->namespaces_mutex
);
2896 list_for_each_entry(ns
, &ctrl
->namespaces
, list
) {
2897 timeout
= blk_mq_freeze_queue_wait_timeout(ns
->queue
, timeout
);
2901 mutex_unlock(&ctrl
->namespaces_mutex
);
2903 EXPORT_SYMBOL_GPL(nvme_wait_freeze_timeout
);
2905 void nvme_wait_freeze(struct nvme_ctrl
*ctrl
)
2909 mutex_lock(&ctrl
->namespaces_mutex
);
2910 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
2911 blk_mq_freeze_queue_wait(ns
->queue
);
2912 mutex_unlock(&ctrl
->namespaces_mutex
);
2914 EXPORT_SYMBOL_GPL(nvme_wait_freeze
);
2916 void nvme_start_freeze(struct nvme_ctrl
*ctrl
)
2920 mutex_lock(&ctrl
->namespaces_mutex
);
2921 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
2922 blk_freeze_queue_start(ns
->queue
);
2923 mutex_unlock(&ctrl
->namespaces_mutex
);
2925 EXPORT_SYMBOL_GPL(nvme_start_freeze
);
2927 void nvme_stop_queues(struct nvme_ctrl
*ctrl
)
2931 mutex_lock(&ctrl
->namespaces_mutex
);
2932 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
2933 blk_mq_quiesce_queue(ns
->queue
);
2934 mutex_unlock(&ctrl
->namespaces_mutex
);
2936 EXPORT_SYMBOL_GPL(nvme_stop_queues
);
2938 void nvme_start_queues(struct nvme_ctrl
*ctrl
)
2942 mutex_lock(&ctrl
->namespaces_mutex
);
2943 list_for_each_entry(ns
, &ctrl
->namespaces
, list
)
2944 blk_mq_unquiesce_queue(ns
->queue
);
2945 mutex_unlock(&ctrl
->namespaces_mutex
);
2947 EXPORT_SYMBOL_GPL(nvme_start_queues
);
2949 int __init
nvme_core_init(void)
2953 nvme_wq
= alloc_workqueue("nvme-wq",
2954 WQ_UNBOUND
| WQ_MEM_RECLAIM
| WQ_SYSFS
, 0);
2958 result
= __register_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme",
2962 else if (result
> 0)
2963 nvme_char_major
= result
;
2965 nvme_class
= class_create(THIS_MODULE
, "nvme");
2966 if (IS_ERR(nvme_class
)) {
2967 result
= PTR_ERR(nvme_class
);
2968 goto unregister_chrdev
;
2974 __unregister_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme");
2976 destroy_workqueue(nvme_wq
);
2980 void nvme_core_exit(void)
2982 class_destroy(nvme_class
);
2983 __unregister_chrdev(nvme_char_major
, 0, NVME_MINORS
, "nvme");
2984 destroy_workqueue(nvme_wq
);
2987 MODULE_LICENSE("GPL");
2988 MODULE_VERSION("1.0");
2989 module_init(nvme_core_init
);
2990 module_exit(nvme_core_exit
);