NVMe: Only clear the enable bit when disabling controller
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / nvme.h
CommitLineData
b60503ba
MW
1/*
2 * Definitions for the NVM Express interface
3 * Copyright (c) 2011, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19#ifndef _LINUX_NVME_H
20#define _LINUX_NVME_H
21
22#include <linux/types.h>
23
24struct nvme_bar {
25 __u64 cap; /* Controller Capabilities */
26 __u32 vs; /* Version */
897cfe1c
MW
27 __u32 intms; /* Interrupt Mask Set */
28 __u32 intmc; /* Interrupt Mask Clear */
b60503ba 29 __u32 cc; /* Controller Configuration */
897cfe1c 30 __u32 rsvd1; /* Reserved */
b60503ba 31 __u32 csts; /* Controller Status */
897cfe1c 32 __u32 rsvd2; /* Reserved */
b60503ba
MW
33 __u32 aqa; /* Admin Queue Attributes */
34 __u64 asq; /* Admin SQ Base Address */
35 __u64 acq; /* Admin CQ Base Address */
36};
37
a0cadb85 38#define NVME_CAP_MQES(cap) ((cap) & 0xffff)
22605f96 39#define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
f1938f6e 40#define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
8fc23e03 41#define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
22605f96 42
b60503ba
MW
43enum {
44 NVME_CC_ENABLE = 1 << 0,
45 NVME_CC_CSS_NVM = 0 << 4,
46 NVME_CC_MPS_SHIFT = 7,
47 NVME_CC_ARB_RR = 0 << 11,
48 NVME_CC_ARB_WRRU = 1 << 11,
7f53f9d2
MW
49 NVME_CC_ARB_VS = 7 << 11,
50 NVME_CC_SHN_NONE = 0 << 14,
51 NVME_CC_SHN_NORMAL = 1 << 14,
52 NVME_CC_SHN_ABRUPT = 2 << 14,
53 NVME_CC_IOSQES = 6 << 16,
54 NVME_CC_IOCQES = 4 << 20,
b60503ba
MW
55 NVME_CSTS_RDY = 1 << 0,
56 NVME_CSTS_CFS = 1 << 1,
57 NVME_CSTS_SHST_NORMAL = 0 << 2,
58 NVME_CSTS_SHST_OCCUR = 1 << 2,
59 NVME_CSTS_SHST_CMPLT = 2 << 2,
60};
61
010e646b
MW
62struct nvme_id_power_state {
63 __le16 max_power; /* centiwatts */
64 __u16 rsvd2;
65 __le32 entry_lat; /* microseconds */
66 __le32 exit_lat; /* microseconds */
67 __u8 read_tput;
68 __u8 read_lat;
69 __u8 write_tput;
70 __u8 write_lat;
71 __u8 rsvd16[16];
72};
73
b60503ba
MW
74#define NVME_VS(major, minor) (major << 16 | minor)
75
76struct nvme_id_ctrl {
77 __le16 vid;
78 __le16 ssvid;
79 char sn[20];
80 char mn[40];
81 char fr[8];
b60503ba 82 __u8 rab;
010e646b
MW
83 __u8 ieee[3];
84 __u8 mic;
85 __u8 mdts;
86 __u8 rsvd78[178];
b60503ba
MW
87 __le16 oacs;
88 __u8 acl;
89 __u8 aerl;
90 __u8 frmw;
91 __u8 lpa;
92 __u8 elpe;
93 __u8 npss;
94 __u8 rsvd264[248];
010e646b
MW
95 __u8 sqes;
96 __u8 cqes;
97 __u8 rsvd514[2];
98 __le32 nn;
b60503ba
MW
99 __le16 oncs;
100 __le16 fuses;
101 __u8 fna;
102 __u8 vwc;
103 __le16 awun;
104 __le16 awupf;
010e646b
MW
105 __u8 rsvd530[1518];
106 struct nvme_id_power_state psd[32];
b60503ba
MW
107 __u8 vs[1024];
108};
109
0e5e4f0e
KB
110enum {
111 NVME_CTRL_ONCS_COMPARE = 1 << 0,
112 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
113 NVME_CTRL_ONCS_DSM = 1 << 2,
114};
115
b60503ba
MW
116struct nvme_lbaf {
117 __le16 ms;
118 __u8 ds;
119 __u8 rp;
120};
121
122struct nvme_id_ns {
123 __le64 nsze;
124 __le64 ncap;
125 __le64 nuse;
126 __u8 nsfeat;
127 __u8 nlbaf;
128 __u8 flbas;
129 __u8 mc;
130 __u8 dpc;
131 __u8 dps;
132 __u8 rsvd30[98];
133 struct nvme_lbaf lbaf[16];
134 __u8 rsvd192[192];
135 __u8 vs[3712];
136};
137
138enum {
139 NVME_NS_FEAT_THIN = 1 << 0,
140 NVME_LBAF_RP_BEST = 0,
141 NVME_LBAF_RP_BETTER = 1,
142 NVME_LBAF_RP_GOOD = 2,
143 NVME_LBAF_RP_DEGRADED = 3,
144};
145
6ecec745
KB
146struct nvme_smart_log {
147 __u8 critical_warning;
148 __u8 temperature[2];
149 __u8 avail_spare;
150 __u8 spare_thresh;
151 __u8 percent_used;
152 __u8 rsvd6[26];
153 __u8 data_units_read[16];
154 __u8 data_units_written[16];
155 __u8 host_reads[16];
156 __u8 host_writes[16];
157 __u8 ctrl_busy_time[16];
158 __u8 power_cycles[16];
159 __u8 power_on_hours[16];
160 __u8 unsafe_shutdowns[16];
161 __u8 media_errors[16];
162 __u8 num_err_log_entries[16];
163 __u8 rsvd192[320];
164};
165
166enum {
167 NVME_SMART_CRIT_SPARE = 1 << 0,
168 NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
169 NVME_SMART_CRIT_RELIABILITY = 1 << 2,
170 NVME_SMART_CRIT_MEDIA = 1 << 3,
171 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
172};
173
b60503ba
MW
174struct nvme_lba_range_type {
175 __u8 type;
176 __u8 attributes;
177 __u8 rsvd2[14];
178 __u64 slba;
179 __u64 nlb;
180 __u8 guid[16];
181 __u8 rsvd48[16];
182};
183
184enum {
185 NVME_LBART_TYPE_FS = 0x01,
186 NVME_LBART_TYPE_RAID = 0x02,
187 NVME_LBART_TYPE_CACHE = 0x03,
188 NVME_LBART_TYPE_SWAP = 0x04,
189
190 NVME_LBART_ATTRIB_TEMP = 1 << 0,
191 NVME_LBART_ATTRIB_HIDE = 1 << 1,
192};
193
194/* I/O commands */
195
196enum nvme_opcode {
197 nvme_cmd_flush = 0x00,
198 nvme_cmd_write = 0x01,
199 nvme_cmd_read = 0x02,
200 nvme_cmd_write_uncor = 0x04,
201 nvme_cmd_compare = 0x05,
202 nvme_cmd_dsm = 0x09,
203};
204
7b4fe9b1
MW
205struct nvme_common_command {
206 __u8 opcode;
207 __u8 flags;
208 __u16 command_id;
209 __le32 nsid;
1c9b5265 210 __le32 cdw2[2];
7b4fe9b1
MW
211 __le64 metadata;
212 __le64 prp1;
213 __le64 prp2;
1c9b5265 214 __le32 cdw10[6];
7b4fe9b1
MW
215};
216
b60503ba
MW
217struct nvme_rw_command {
218 __u8 opcode;
219 __u8 flags;
220 __u16 command_id;
221 __le32 nsid;
222 __u64 rsvd2;
223 __le64 metadata;
224 __le64 prp1;
225 __le64 prp2;
226 __le64 slba;
227 __le16 length;
228 __le16 control;
229 __le32 dsmgmt;
230 __le32 reftag;
231 __le16 apptag;
232 __le16 appmask;
233};
234
235enum {
236 NVME_RW_LR = 1 << 15,
237 NVME_RW_FUA = 1 << 14,
238 NVME_RW_DSM_FREQ_UNSPEC = 0,
239 NVME_RW_DSM_FREQ_TYPICAL = 1,
240 NVME_RW_DSM_FREQ_RARE = 2,
241 NVME_RW_DSM_FREQ_READS = 3,
242 NVME_RW_DSM_FREQ_WRITES = 4,
243 NVME_RW_DSM_FREQ_RW = 5,
244 NVME_RW_DSM_FREQ_ONCE = 6,
245 NVME_RW_DSM_FREQ_PREFETCH = 7,
246 NVME_RW_DSM_FREQ_TEMP = 8,
247 NVME_RW_DSM_LATENCY_NONE = 0 << 4,
248 NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
249 NVME_RW_DSM_LATENCY_NORM = 2 << 4,
250 NVME_RW_DSM_LATENCY_LOW = 3 << 4,
251 NVME_RW_DSM_SEQ_REQ = 1 << 6,
252 NVME_RW_DSM_COMPRESSED = 1 << 7,
253};
254
0e5e4f0e
KB
255struct nvme_dsm_cmd {
256 __u8 opcode;
257 __u8 flags;
258 __u16 command_id;
259 __le32 nsid;
260 __u64 rsvd2[2];
261 __le64 prp1;
262 __le64 prp2;
263 __le32 nr;
264 __le32 attributes;
265 __u32 rsvd12[4];
266};
267
268enum {
269 NVME_DSMGMT_IDR = 1 << 0,
270 NVME_DSMGMT_IDW = 1 << 1,
271 NVME_DSMGMT_AD = 1 << 2,
272};
273
274struct nvme_dsm_range {
275 __le32 cattr;
276 __le32 nlb;
277 __le64 slba;
278};
279
b60503ba
MW
280/* Admin commands */
281
282enum nvme_admin_opcode {
283 nvme_admin_delete_sq = 0x00,
284 nvme_admin_create_sq = 0x01,
2ddc4f74 285 nvme_admin_get_log_page = 0x02,
b60503ba
MW
286 nvme_admin_delete_cq = 0x04,
287 nvme_admin_create_cq = 0x05,
288 nvme_admin_identify = 0x06,
289 nvme_admin_abort_cmd = 0x08,
290 nvme_admin_set_features = 0x09,
2ddc4f74 291 nvme_admin_get_features = 0x0a,
b60503ba 292 nvme_admin_async_event = 0x0c,
2ddc4f74
KW
293 nvme_admin_activate_fw = 0x10,
294 nvme_admin_download_fw = 0x11,
295 nvme_admin_format_nvm = 0x80,
296 nvme_admin_security_send = 0x81,
297 nvme_admin_security_recv = 0x82,
b60503ba
MW
298};
299
300enum {
301 NVME_QUEUE_PHYS_CONTIG = (1 << 0),
302 NVME_CQ_IRQ_ENABLED = (1 << 1),
303 NVME_SQ_PRIO_URGENT = (0 << 1),
304 NVME_SQ_PRIO_HIGH = (1 << 1),
305 NVME_SQ_PRIO_MEDIUM = (2 << 1),
306 NVME_SQ_PRIO_LOW = (3 << 1),
307 NVME_FEAT_ARBITRATION = 0x01,
308 NVME_FEAT_POWER_MGMT = 0x02,
309 NVME_FEAT_LBA_RANGE = 0x03,
310 NVME_FEAT_TEMP_THRESH = 0x04,
311 NVME_FEAT_ERR_RECOVERY = 0x05,
312 NVME_FEAT_VOLATILE_WC = 0x06,
313 NVME_FEAT_NUM_QUEUES = 0x07,
314 NVME_FEAT_IRQ_COALESCE = 0x08,
315 NVME_FEAT_IRQ_CONFIG = 0x09,
316 NVME_FEAT_WRITE_ATOMIC = 0x0a,
317 NVME_FEAT_ASYNC_EVENT = 0x0b,
318 NVME_FEAT_SW_PROGRESS = 0x0c,
319};
320
321struct nvme_identify {
322 __u8 opcode;
323 __u8 flags;
324 __u16 command_id;
325 __le32 nsid;
326 __u64 rsvd2[2];
327 __le64 prp1;
328 __le64 prp2;
329 __le32 cns;
330 __u32 rsvd11[5];
331};
332
333struct nvme_features {
334 __u8 opcode;
335 __u8 flags;
336 __u16 command_id;
337 __le32 nsid;
338 __u64 rsvd2[2];
339 __le64 prp1;
340 __le64 prp2;
341 __le32 fid;
342 __le32 dword11;
343 __u32 rsvd12[4];
344};
345
346struct nvme_create_cq {
347 __u8 opcode;
348 __u8 flags;
349 __u16 command_id;
6ee44cdc 350 __u32 rsvd1[5];
b60503ba
MW
351 __le64 prp1;
352 __u64 rsvd8;
353 __le16 cqid;
354 __le16 qsize;
355 __le16 cq_flags;
356 __le16 irq_vector;
357 __u32 rsvd12[4];
358};
359
360struct nvme_create_sq {
361 __u8 opcode;
362 __u8 flags;
363 __u16 command_id;
6ee44cdc 364 __u32 rsvd1[5];
b60503ba
MW
365 __le64 prp1;
366 __u64 rsvd8;
367 __le16 sqid;
368 __le16 qsize;
369 __le16 sq_flags;
370 __le16 cqid;
6ee44cdc 371 __u32 rsvd12[4];
b60503ba
MW
372};
373
374struct nvme_delete_queue {
375 __u8 opcode;
376 __u8 flags;
377 __u16 command_id;
378 __u32 rsvd1[9];
379 __le16 qid;
6ee44cdc
MW
380 __u16 rsvd10;
381 __u32 rsvd11[5];
382};
383
384struct nvme_download_firmware {
385 __u8 opcode;
386 __u8 flags;
387 __u16 command_id;
388 __u32 rsvd1[5];
389 __le64 prp1;
390 __le64 prp2;
391 __le32 numd;
392 __le32 offset;
393 __u32 rsvd12[4];
b60503ba
MW
394};
395
f8ebf840
VV
396struct nvme_format_cmd {
397 __u8 opcode;
398 __u8 flags;
399 __u16 command_id;
400 __le32 nsid;
401 __u64 rsvd2[4];
402 __le32 cdw10;
403 __u32 rsvd11[5];
404};
405
b60503ba
MW
406struct nvme_command {
407 union {
408 struct nvme_common_command common;
409 struct nvme_rw_command rw;
410 struct nvme_identify identify;
411 struct nvme_features features;
412 struct nvme_create_cq create_cq;
413 struct nvme_create_sq create_sq;
414 struct nvme_delete_queue delete_queue;
6ee44cdc 415 struct nvme_download_firmware dlfw;
f8ebf840 416 struct nvme_format_cmd format;
0e5e4f0e 417 struct nvme_dsm_cmd dsm;
b60503ba
MW
418 };
419};
420
b60503ba
MW
421enum {
422 NVME_SC_SUCCESS = 0x0,
423 NVME_SC_INVALID_OPCODE = 0x1,
424 NVME_SC_INVALID_FIELD = 0x2,
425 NVME_SC_CMDID_CONFLICT = 0x3,
426 NVME_SC_DATA_XFER_ERROR = 0x4,
427 NVME_SC_POWER_LOSS = 0x5,
428 NVME_SC_INTERNAL = 0x6,
429 NVME_SC_ABORT_REQ = 0x7,
430 NVME_SC_ABORT_QUEUE = 0x8,
431 NVME_SC_FUSED_FAIL = 0x9,
432 NVME_SC_FUSED_MISSING = 0xa,
7a63e07b 433 NVME_SC_INVALID_NS = 0xb,
f8ebf840 434 NVME_SC_CMD_SEQ_ERROR = 0xc,
b60503ba
MW
435 NVME_SC_LBA_RANGE = 0x80,
436 NVME_SC_CAP_EXCEEDED = 0x81,
437 NVME_SC_NS_NOT_READY = 0x82,
438 NVME_SC_CQ_INVALID = 0x100,
439 NVME_SC_QID_INVALID = 0x101,
440 NVME_SC_QUEUE_SIZE = 0x102,
7a63e07b
MW
441 NVME_SC_ABORT_LIMIT = 0x103,
442 NVME_SC_ABORT_MISSING = 0x104,
443 NVME_SC_ASYNC_LIMIT = 0x105,
444 NVME_SC_FIRMWARE_SLOT = 0x106,
445 NVME_SC_FIRMWARE_IMAGE = 0x107,
446 NVME_SC_INVALID_VECTOR = 0x108,
447 NVME_SC_INVALID_LOG_PAGE = 0x109,
448 NVME_SC_INVALID_FORMAT = 0x10a,
449 NVME_SC_BAD_ATTRIBUTES = 0x180,
b60503ba
MW
450 NVME_SC_WRITE_FAULT = 0x280,
451 NVME_SC_READ_ERROR = 0x281,
7a63e07b
MW
452 NVME_SC_GUARD_CHECK = 0x282,
453 NVME_SC_APPTAG_CHECK = 0x283,
454 NVME_SC_REFTAG_CHECK = 0x284,
455 NVME_SC_COMPARE_FAILED = 0x285,
456 NVME_SC_ACCESS_DENIED = 0x286,
b60503ba
MW
457};
458
459struct nvme_completion {
460 __le32 result; /* Used by admin commands to return data */
6ee44cdc 461 __u32 rsvd;
b60503ba
MW
462 __le16 sq_head; /* how much of this queue may be reclaimed */
463 __le16 sq_id; /* submission queue that generated this entry */
464 __u16 command_id; /* of the command which completed */
465 __le16 status; /* did the command fail, and if so, why? */
466};
467
a53295b6
MW
468struct nvme_user_io {
469 __u8 opcode;
470 __u8 flags;
471 __u16 control;
6c7d4945
MW
472 __u16 nblocks;
473 __u16 rsvd;
a53295b6
MW
474 __u64 metadata;
475 __u64 addr;
476 __u64 slba;
a53295b6
MW
477 __u32 dsmgmt;
478 __u32 reftag;
479 __u16 apptag;
480 __u16 appmask;
a53295b6
MW
481};
482
6bbf1acd
MW
483struct nvme_admin_cmd {
484 __u8 opcode;
485 __u8 flags;
486 __u16 rsvd1;
487 __u32 nsid;
488 __u32 cdw2;
489 __u32 cdw3;
490 __u64 metadata;
6ee44cdc 491 __u64 addr;
6bbf1acd
MW
492 __u32 metadata_len;
493 __u32 data_len;
494 __u32 cdw10;
495 __u32 cdw11;
496 __u32 cdw12;
497 __u32 cdw13;
498 __u32 cdw14;
499 __u32 cdw15;
500 __u32 timeout_ms;
501 __u32 result;
6ee44cdc
MW
502};
503
6bbf1acd
MW
504#define NVME_IOCTL_ID _IO('N', 0x40)
505#define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd)
506#define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io)
b60503ba 507
13c3b0fc
VV
508#ifdef __KERNEL__
509#include <linux/pci.h>
5e82e952
KB
510#include <linux/miscdevice.h>
511#include <linux/kref.h>
13c3b0fc
VV
512
513#define NVME_IO_TIMEOUT (5 * HZ)
514
515/*
516 * Represents an NVM Express device. Each nvme_dev is a PCI function.
517 */
518struct nvme_dev {
519 struct list_head node;
520 struct nvme_queue **queues;
521 u32 __iomem *dbs;
522 struct pci_dev *pci_dev;
523 struct dma_pool *prp_page_pool;
524 struct dma_pool *prp_small_pool;
525 int instance;
526 int queue_count;
527 int db_stride;
528 u32 ctrl_config;
529 struct msix_entry *entry;
530 struct nvme_bar __iomem *bar;
531 struct list_head namespaces;
5e82e952
KB
532 struct kref kref;
533 struct miscdevice miscdev;
534 char name[12];
13c3b0fc
VV
535 char serial[20];
536 char model[40];
537 char firmware_rev[8];
538 u32 max_hw_sectors;
159b67d7 539 u32 stripe_size;
13c3b0fc
VV
540 u16 oncs;
541};
542
543/*
544 * An NVM Express namespace is equivalent to a SCSI LUN
545 */
546struct nvme_ns {
547 struct list_head list;
548
549 struct nvme_dev *dev;
550 struct request_queue *queue;
551 struct gendisk *disk;
552
553 int ns_id;
554 int lba_shift;
f410c680 555 int ms;
5d0f6131
VV
556 u64 mode_select_num_blocks;
557 u32 mode_select_block_len;
13c3b0fc
VV
558};
559
560/*
561 * The nvme_iod describes the data in an I/O, including the list of PRP
562 * entries. You can't see it in this data structure because C doesn't let
563 * me express that. Use nvme_alloc_iod to ensure there's enough space
564 * allocated to store the PRP list.
565 */
566struct nvme_iod {
567 void *private; /* For the use of the submitter of the I/O */
568 int npages; /* In the PRP list. 0 means small pool in use */
569 int offset; /* Of PRP list */
570 int nents; /* Used in scatterlist */
571 int length; /* Of data, in bytes */
572 dma_addr_t first_dma;
573 struct scatterlist sg[0];
574};
5d0f6131 575
063cc6d5
MW
576static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
577{
578 return (sector >> (ns->lba_shift - 9));
579}
580
5d0f6131
VV
581/**
582 * nvme_free_iod - frees an nvme_iod
583 * @dev: The device that the I/O was submitted to
584 * @iod: The memory to free
585 */
586void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
587
588int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
589 struct nvme_iod *iod, int total_len, gfp_t gfp);
590struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
591 unsigned long addr, unsigned length);
592void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
593 struct nvme_iod *iod);
594struct nvme_queue *get_nvmeq(struct nvme_dev *dev);
595void put_nvmeq(struct nvme_queue *nvmeq);
596int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
597 u32 *result, unsigned timeout);
598int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
599int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
600 u32 *result);
601int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
602 dma_addr_t dma_addr);
603int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
604 dma_addr_t dma_addr, u32 *result);
605int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
606 dma_addr_t dma_addr, u32 *result);
607
608struct sg_io_hdr;
609
610int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
611int nvme_sg_get_version_num(int __user *ip);
612
13c3b0fc
VV
613#endif
614
b60503ba 615#endif /* _LINUX_NVME_H */