drivers: power: report battery voltage in AOSP compatible format
[GitHub/mt8127/android_kernel_alcatel_ttab.git] / include / linux / nvme.h
1 /*
2 * Definitions for the NVM Express interface
3 * Copyright (c) 2011, Intel Corporation.
4 *
5 * This program is free software; you can redistribute it and/or modify it
6 * under the terms and conditions of the GNU General Public License,
7 * version 2, as published by the Free Software Foundation.
8 *
9 * This program is distributed in the hope it will be useful, but WITHOUT
10 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
11 * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
12 * more details.
13 *
14 * You should have received a copy of the GNU General Public License along with
15 * this program; if not, write to the Free Software Foundation, Inc.,
16 * 51 Franklin St - Fifth Floor, Boston, MA 02110-1301 USA.
17 */
18
19 #ifndef _LINUX_NVME_H
20 #define _LINUX_NVME_H
21
22 #include <linux/types.h>
23
24 struct nvme_bar {
25 __u64 cap; /* Controller Capabilities */
26 __u32 vs; /* Version */
27 __u32 intms; /* Interrupt Mask Set */
28 __u32 intmc; /* Interrupt Mask Clear */
29 __u32 cc; /* Controller Configuration */
30 __u32 rsvd1; /* Reserved */
31 __u32 csts; /* Controller Status */
32 __u32 rsvd2; /* Reserved */
33 __u32 aqa; /* Admin Queue Attributes */
34 __u64 asq; /* Admin SQ Base Address */
35 __u64 acq; /* Admin CQ Base Address */
36 };
37
38 #define NVME_CAP_MQES(cap) ((cap) & 0xffff)
39 #define NVME_CAP_TIMEOUT(cap) (((cap) >> 24) & 0xff)
40 #define NVME_CAP_STRIDE(cap) (((cap) >> 32) & 0xf)
41 #define NVME_CAP_MPSMIN(cap) (((cap) >> 48) & 0xf)
42
43 enum {
44 NVME_CC_ENABLE = 1 << 0,
45 NVME_CC_CSS_NVM = 0 << 4,
46 NVME_CC_MPS_SHIFT = 7,
47 NVME_CC_ARB_RR = 0 << 11,
48 NVME_CC_ARB_WRRU = 1 << 11,
49 NVME_CC_ARB_VS = 7 << 11,
50 NVME_CC_SHN_NONE = 0 << 14,
51 NVME_CC_SHN_NORMAL = 1 << 14,
52 NVME_CC_SHN_ABRUPT = 2 << 14,
53 NVME_CC_IOSQES = 6 << 16,
54 NVME_CC_IOCQES = 4 << 20,
55 NVME_CSTS_RDY = 1 << 0,
56 NVME_CSTS_CFS = 1 << 1,
57 NVME_CSTS_SHST_NORMAL = 0 << 2,
58 NVME_CSTS_SHST_OCCUR = 1 << 2,
59 NVME_CSTS_SHST_CMPLT = 2 << 2,
60 };
61
62 struct nvme_id_power_state {
63 __le16 max_power; /* centiwatts */
64 __u16 rsvd2;
65 __le32 entry_lat; /* microseconds */
66 __le32 exit_lat; /* microseconds */
67 __u8 read_tput;
68 __u8 read_lat;
69 __u8 write_tput;
70 __u8 write_lat;
71 __u8 rsvd16[16];
72 };
73
74 #define NVME_VS(major, minor) (major << 16 | minor)
75
76 struct nvme_id_ctrl {
77 __le16 vid;
78 __le16 ssvid;
79 char sn[20];
80 char mn[40];
81 char fr[8];
82 __u8 rab;
83 __u8 ieee[3];
84 __u8 mic;
85 __u8 mdts;
86 __u8 rsvd78[178];
87 __le16 oacs;
88 __u8 acl;
89 __u8 aerl;
90 __u8 frmw;
91 __u8 lpa;
92 __u8 elpe;
93 __u8 npss;
94 __u8 rsvd264[248];
95 __u8 sqes;
96 __u8 cqes;
97 __u8 rsvd514[2];
98 __le32 nn;
99 __le16 oncs;
100 __le16 fuses;
101 __u8 fna;
102 __u8 vwc;
103 __le16 awun;
104 __le16 awupf;
105 __u8 rsvd530[1518];
106 struct nvme_id_power_state psd[32];
107 __u8 vs[1024];
108 };
109
110 enum {
111 NVME_CTRL_ONCS_COMPARE = 1 << 0,
112 NVME_CTRL_ONCS_WRITE_UNCORRECTABLE = 1 << 1,
113 NVME_CTRL_ONCS_DSM = 1 << 2,
114 };
115
116 struct nvme_lbaf {
117 __le16 ms;
118 __u8 ds;
119 __u8 rp;
120 };
121
122 struct nvme_id_ns {
123 __le64 nsze;
124 __le64 ncap;
125 __le64 nuse;
126 __u8 nsfeat;
127 __u8 nlbaf;
128 __u8 flbas;
129 __u8 mc;
130 __u8 dpc;
131 __u8 dps;
132 __u8 rsvd30[98];
133 struct nvme_lbaf lbaf[16];
134 __u8 rsvd192[192];
135 __u8 vs[3712];
136 };
137
138 enum {
139 NVME_NS_FEAT_THIN = 1 << 0,
140 NVME_LBAF_RP_BEST = 0,
141 NVME_LBAF_RP_BETTER = 1,
142 NVME_LBAF_RP_GOOD = 2,
143 NVME_LBAF_RP_DEGRADED = 3,
144 };
145
146 struct nvme_smart_log {
147 __u8 critical_warning;
148 __u8 temperature[2];
149 __u8 avail_spare;
150 __u8 spare_thresh;
151 __u8 percent_used;
152 __u8 rsvd6[26];
153 __u8 data_units_read[16];
154 __u8 data_units_written[16];
155 __u8 host_reads[16];
156 __u8 host_writes[16];
157 __u8 ctrl_busy_time[16];
158 __u8 power_cycles[16];
159 __u8 power_on_hours[16];
160 __u8 unsafe_shutdowns[16];
161 __u8 media_errors[16];
162 __u8 num_err_log_entries[16];
163 __u8 rsvd192[320];
164 };
165
166 enum {
167 NVME_SMART_CRIT_SPARE = 1 << 0,
168 NVME_SMART_CRIT_TEMPERATURE = 1 << 1,
169 NVME_SMART_CRIT_RELIABILITY = 1 << 2,
170 NVME_SMART_CRIT_MEDIA = 1 << 3,
171 NVME_SMART_CRIT_VOLATILE_MEMORY = 1 << 4,
172 };
173
174 struct nvme_lba_range_type {
175 __u8 type;
176 __u8 attributes;
177 __u8 rsvd2[14];
178 __u64 slba;
179 __u64 nlb;
180 __u8 guid[16];
181 __u8 rsvd48[16];
182 };
183
184 enum {
185 NVME_LBART_TYPE_FS = 0x01,
186 NVME_LBART_TYPE_RAID = 0x02,
187 NVME_LBART_TYPE_CACHE = 0x03,
188 NVME_LBART_TYPE_SWAP = 0x04,
189
190 NVME_LBART_ATTRIB_TEMP = 1 << 0,
191 NVME_LBART_ATTRIB_HIDE = 1 << 1,
192 };
193
194 /* I/O commands */
195
196 enum nvme_opcode {
197 nvme_cmd_flush = 0x00,
198 nvme_cmd_write = 0x01,
199 nvme_cmd_read = 0x02,
200 nvme_cmd_write_uncor = 0x04,
201 nvme_cmd_compare = 0x05,
202 nvme_cmd_dsm = 0x09,
203 };
204
205 struct nvme_common_command {
206 __u8 opcode;
207 __u8 flags;
208 __u16 command_id;
209 __le32 nsid;
210 __le32 cdw2[2];
211 __le64 metadata;
212 __le64 prp1;
213 __le64 prp2;
214 __le32 cdw10[6];
215 };
216
217 struct nvme_rw_command {
218 __u8 opcode;
219 __u8 flags;
220 __u16 command_id;
221 __le32 nsid;
222 __u64 rsvd2;
223 __le64 metadata;
224 __le64 prp1;
225 __le64 prp2;
226 __le64 slba;
227 __le16 length;
228 __le16 control;
229 __le32 dsmgmt;
230 __le32 reftag;
231 __le16 apptag;
232 __le16 appmask;
233 };
234
235 enum {
236 NVME_RW_LR = 1 << 15,
237 NVME_RW_FUA = 1 << 14,
238 NVME_RW_DSM_FREQ_UNSPEC = 0,
239 NVME_RW_DSM_FREQ_TYPICAL = 1,
240 NVME_RW_DSM_FREQ_RARE = 2,
241 NVME_RW_DSM_FREQ_READS = 3,
242 NVME_RW_DSM_FREQ_WRITES = 4,
243 NVME_RW_DSM_FREQ_RW = 5,
244 NVME_RW_DSM_FREQ_ONCE = 6,
245 NVME_RW_DSM_FREQ_PREFETCH = 7,
246 NVME_RW_DSM_FREQ_TEMP = 8,
247 NVME_RW_DSM_LATENCY_NONE = 0 << 4,
248 NVME_RW_DSM_LATENCY_IDLE = 1 << 4,
249 NVME_RW_DSM_LATENCY_NORM = 2 << 4,
250 NVME_RW_DSM_LATENCY_LOW = 3 << 4,
251 NVME_RW_DSM_SEQ_REQ = 1 << 6,
252 NVME_RW_DSM_COMPRESSED = 1 << 7,
253 };
254
255 struct nvme_dsm_cmd {
256 __u8 opcode;
257 __u8 flags;
258 __u16 command_id;
259 __le32 nsid;
260 __u64 rsvd2[2];
261 __le64 prp1;
262 __le64 prp2;
263 __le32 nr;
264 __le32 attributes;
265 __u32 rsvd12[4];
266 };
267
268 enum {
269 NVME_DSMGMT_IDR = 1 << 0,
270 NVME_DSMGMT_IDW = 1 << 1,
271 NVME_DSMGMT_AD = 1 << 2,
272 };
273
274 struct nvme_dsm_range {
275 __le32 cattr;
276 __le32 nlb;
277 __le64 slba;
278 };
279
280 /* Admin commands */
281
282 enum nvme_admin_opcode {
283 nvme_admin_delete_sq = 0x00,
284 nvme_admin_create_sq = 0x01,
285 nvme_admin_get_log_page = 0x02,
286 nvme_admin_delete_cq = 0x04,
287 nvme_admin_create_cq = 0x05,
288 nvme_admin_identify = 0x06,
289 nvme_admin_abort_cmd = 0x08,
290 nvme_admin_set_features = 0x09,
291 nvme_admin_get_features = 0x0a,
292 nvme_admin_async_event = 0x0c,
293 nvme_admin_activate_fw = 0x10,
294 nvme_admin_download_fw = 0x11,
295 nvme_admin_format_nvm = 0x80,
296 nvme_admin_security_send = 0x81,
297 nvme_admin_security_recv = 0x82,
298 };
299
300 enum {
301 NVME_QUEUE_PHYS_CONTIG = (1 << 0),
302 NVME_CQ_IRQ_ENABLED = (1 << 1),
303 NVME_SQ_PRIO_URGENT = (0 << 1),
304 NVME_SQ_PRIO_HIGH = (1 << 1),
305 NVME_SQ_PRIO_MEDIUM = (2 << 1),
306 NVME_SQ_PRIO_LOW = (3 << 1),
307 NVME_FEAT_ARBITRATION = 0x01,
308 NVME_FEAT_POWER_MGMT = 0x02,
309 NVME_FEAT_LBA_RANGE = 0x03,
310 NVME_FEAT_TEMP_THRESH = 0x04,
311 NVME_FEAT_ERR_RECOVERY = 0x05,
312 NVME_FEAT_VOLATILE_WC = 0x06,
313 NVME_FEAT_NUM_QUEUES = 0x07,
314 NVME_FEAT_IRQ_COALESCE = 0x08,
315 NVME_FEAT_IRQ_CONFIG = 0x09,
316 NVME_FEAT_WRITE_ATOMIC = 0x0a,
317 NVME_FEAT_ASYNC_EVENT = 0x0b,
318 NVME_FEAT_SW_PROGRESS = 0x0c,
319 NVME_FWACT_REPL = (0 << 3),
320 NVME_FWACT_REPL_ACTV = (1 << 3),
321 NVME_FWACT_ACTV = (2 << 3),
322 };
323
324 struct nvme_identify {
325 __u8 opcode;
326 __u8 flags;
327 __u16 command_id;
328 __le32 nsid;
329 __u64 rsvd2[2];
330 __le64 prp1;
331 __le64 prp2;
332 __le32 cns;
333 __u32 rsvd11[5];
334 };
335
336 struct nvme_features {
337 __u8 opcode;
338 __u8 flags;
339 __u16 command_id;
340 __le32 nsid;
341 __u64 rsvd2[2];
342 __le64 prp1;
343 __le64 prp2;
344 __le32 fid;
345 __le32 dword11;
346 __u32 rsvd12[4];
347 };
348
349 struct nvme_create_cq {
350 __u8 opcode;
351 __u8 flags;
352 __u16 command_id;
353 __u32 rsvd1[5];
354 __le64 prp1;
355 __u64 rsvd8;
356 __le16 cqid;
357 __le16 qsize;
358 __le16 cq_flags;
359 __le16 irq_vector;
360 __u32 rsvd12[4];
361 };
362
363 struct nvme_create_sq {
364 __u8 opcode;
365 __u8 flags;
366 __u16 command_id;
367 __u32 rsvd1[5];
368 __le64 prp1;
369 __u64 rsvd8;
370 __le16 sqid;
371 __le16 qsize;
372 __le16 sq_flags;
373 __le16 cqid;
374 __u32 rsvd12[4];
375 };
376
377 struct nvme_delete_queue {
378 __u8 opcode;
379 __u8 flags;
380 __u16 command_id;
381 __u32 rsvd1[9];
382 __le16 qid;
383 __u16 rsvd10;
384 __u32 rsvd11[5];
385 };
386
387 struct nvme_download_firmware {
388 __u8 opcode;
389 __u8 flags;
390 __u16 command_id;
391 __u32 rsvd1[5];
392 __le64 prp1;
393 __le64 prp2;
394 __le32 numd;
395 __le32 offset;
396 __u32 rsvd12[4];
397 };
398
399 struct nvme_format_cmd {
400 __u8 opcode;
401 __u8 flags;
402 __u16 command_id;
403 __le32 nsid;
404 __u64 rsvd2[4];
405 __le32 cdw10;
406 __u32 rsvd11[5];
407 };
408
409 struct nvme_command {
410 union {
411 struct nvme_common_command common;
412 struct nvme_rw_command rw;
413 struct nvme_identify identify;
414 struct nvme_features features;
415 struct nvme_create_cq create_cq;
416 struct nvme_create_sq create_sq;
417 struct nvme_delete_queue delete_queue;
418 struct nvme_download_firmware dlfw;
419 struct nvme_format_cmd format;
420 struct nvme_dsm_cmd dsm;
421 };
422 };
423
424 enum {
425 NVME_SC_SUCCESS = 0x0,
426 NVME_SC_INVALID_OPCODE = 0x1,
427 NVME_SC_INVALID_FIELD = 0x2,
428 NVME_SC_CMDID_CONFLICT = 0x3,
429 NVME_SC_DATA_XFER_ERROR = 0x4,
430 NVME_SC_POWER_LOSS = 0x5,
431 NVME_SC_INTERNAL = 0x6,
432 NVME_SC_ABORT_REQ = 0x7,
433 NVME_SC_ABORT_QUEUE = 0x8,
434 NVME_SC_FUSED_FAIL = 0x9,
435 NVME_SC_FUSED_MISSING = 0xa,
436 NVME_SC_INVALID_NS = 0xb,
437 NVME_SC_CMD_SEQ_ERROR = 0xc,
438 NVME_SC_LBA_RANGE = 0x80,
439 NVME_SC_CAP_EXCEEDED = 0x81,
440 NVME_SC_NS_NOT_READY = 0x82,
441 NVME_SC_CQ_INVALID = 0x100,
442 NVME_SC_QID_INVALID = 0x101,
443 NVME_SC_QUEUE_SIZE = 0x102,
444 NVME_SC_ABORT_LIMIT = 0x103,
445 NVME_SC_ABORT_MISSING = 0x104,
446 NVME_SC_ASYNC_LIMIT = 0x105,
447 NVME_SC_FIRMWARE_SLOT = 0x106,
448 NVME_SC_FIRMWARE_IMAGE = 0x107,
449 NVME_SC_INVALID_VECTOR = 0x108,
450 NVME_SC_INVALID_LOG_PAGE = 0x109,
451 NVME_SC_INVALID_FORMAT = 0x10a,
452 NVME_SC_BAD_ATTRIBUTES = 0x180,
453 NVME_SC_WRITE_FAULT = 0x280,
454 NVME_SC_READ_ERROR = 0x281,
455 NVME_SC_GUARD_CHECK = 0x282,
456 NVME_SC_APPTAG_CHECK = 0x283,
457 NVME_SC_REFTAG_CHECK = 0x284,
458 NVME_SC_COMPARE_FAILED = 0x285,
459 NVME_SC_ACCESS_DENIED = 0x286,
460 };
461
462 struct nvme_completion {
463 __le32 result; /* Used by admin commands to return data */
464 __u32 rsvd;
465 __le16 sq_head; /* how much of this queue may be reclaimed */
466 __le16 sq_id; /* submission queue that generated this entry */
467 __u16 command_id; /* of the command which completed */
468 __le16 status; /* did the command fail, and if so, why? */
469 };
470
471 struct nvme_user_io {
472 __u8 opcode;
473 __u8 flags;
474 __u16 control;
475 __u16 nblocks;
476 __u16 rsvd;
477 __u64 metadata;
478 __u64 addr;
479 __u64 slba;
480 __u32 dsmgmt;
481 __u32 reftag;
482 __u16 apptag;
483 __u16 appmask;
484 };
485
486 struct nvme_admin_cmd {
487 __u8 opcode;
488 __u8 flags;
489 __u16 rsvd1;
490 __u32 nsid;
491 __u32 cdw2;
492 __u32 cdw3;
493 __u64 metadata;
494 __u64 addr;
495 __u32 metadata_len;
496 __u32 data_len;
497 __u32 cdw10;
498 __u32 cdw11;
499 __u32 cdw12;
500 __u32 cdw13;
501 __u32 cdw14;
502 __u32 cdw15;
503 __u32 timeout_ms;
504 __u32 result;
505 };
506
507 #define NVME_IOCTL_ID _IO('N', 0x40)
508 #define NVME_IOCTL_ADMIN_CMD _IOWR('N', 0x41, struct nvme_admin_cmd)
509 #define NVME_IOCTL_SUBMIT_IO _IOW('N', 0x42, struct nvme_user_io)
510
511 #ifdef __KERNEL__
512 #include <linux/pci.h>
513 #include <linux/miscdevice.h>
514 #include <linux/kref.h>
515
516 #define NVME_IO_TIMEOUT (5 * HZ)
517
518 /*
519 * Represents an NVM Express device. Each nvme_dev is a PCI function.
520 */
521 struct nvme_dev {
522 struct list_head node;
523 struct nvme_queue **queues;
524 u32 __iomem *dbs;
525 struct pci_dev *pci_dev;
526 struct dma_pool *prp_page_pool;
527 struct dma_pool *prp_small_pool;
528 int instance;
529 int queue_count;
530 int db_stride;
531 u32 ctrl_config;
532 struct msix_entry *entry;
533 struct nvme_bar __iomem *bar;
534 struct list_head namespaces;
535 struct kref kref;
536 struct miscdevice miscdev;
537 char name[12];
538 char serial[20];
539 char model[40];
540 char firmware_rev[8];
541 u32 max_hw_sectors;
542 u32 stripe_size;
543 u16 oncs;
544 };
545
546 /*
547 * An NVM Express namespace is equivalent to a SCSI LUN
548 */
549 struct nvme_ns {
550 struct list_head list;
551
552 struct nvme_dev *dev;
553 struct request_queue *queue;
554 struct gendisk *disk;
555
556 int ns_id;
557 int lba_shift;
558 int ms;
559 u64 mode_select_num_blocks;
560 u32 mode_select_block_len;
561 };
562
563 /*
564 * The nvme_iod describes the data in an I/O, including the list of PRP
565 * entries. You can't see it in this data structure because C doesn't let
566 * me express that. Use nvme_alloc_iod to ensure there's enough space
567 * allocated to store the PRP list.
568 */
569 struct nvme_iod {
570 void *private; /* For the use of the submitter of the I/O */
571 int npages; /* In the PRP list. 0 means small pool in use */
572 int offset; /* Of PRP list */
573 int nents; /* Used in scatterlist */
574 int length; /* Of data, in bytes */
575 dma_addr_t first_dma;
576 struct scatterlist sg[0];
577 };
578
579 static inline u64 nvme_block_nr(struct nvme_ns *ns, sector_t sector)
580 {
581 return (sector >> (ns->lba_shift - 9));
582 }
583
584 /**
585 * nvme_free_iod - frees an nvme_iod
586 * @dev: The device that the I/O was submitted to
587 * @iod: The memory to free
588 */
589 void nvme_free_iod(struct nvme_dev *dev, struct nvme_iod *iod);
590
591 int nvme_setup_prps(struct nvme_dev *dev, struct nvme_common_command *cmd,
592 struct nvme_iod *iod, int total_len, gfp_t gfp);
593 struct nvme_iod *nvme_map_user_pages(struct nvme_dev *dev, int write,
594 unsigned long addr, unsigned length);
595 void nvme_unmap_user_pages(struct nvme_dev *dev, int write,
596 struct nvme_iod *iod);
597 struct nvme_queue *get_nvmeq(struct nvme_dev *dev);
598 void put_nvmeq(struct nvme_queue *nvmeq);
599 int nvme_submit_sync_cmd(struct nvme_queue *nvmeq, struct nvme_command *cmd,
600 u32 *result, unsigned timeout);
601 int nvme_submit_flush_data(struct nvme_queue *nvmeq, struct nvme_ns *ns);
602 int nvme_submit_admin_cmd(struct nvme_dev *, struct nvme_command *,
603 u32 *result);
604 int nvme_identify(struct nvme_dev *, unsigned nsid, unsigned cns,
605 dma_addr_t dma_addr);
606 int nvme_get_features(struct nvme_dev *dev, unsigned fid, unsigned nsid,
607 dma_addr_t dma_addr, u32 *result);
608 int nvme_set_features(struct nvme_dev *dev, unsigned fid, unsigned dword11,
609 dma_addr_t dma_addr, u32 *result);
610
611 struct sg_io_hdr;
612
613 int nvme_sg_io(struct nvme_ns *ns, struct sg_io_hdr __user *u_hdr);
614 int nvme_sg_get_version_num(int __user *ip);
615
616 #endif
617
618 #endif /* _LINUX_NVME_H */