if (bio_flagged(bio_src, BIO_THROTTLED))
bio_set_flag(bio, BIO_THROTTLED);
bio->bi_opf = bio_src->bi_opf;
+#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+ bio->bi_iter.bi_dun = bio_src->bi_iter.bi_dun;
+#endif
bio->bi_write_hint = bio_src->bi_write_hint;
bio->bi_iter = bio_src->bi_iter;
bio->bi_io_vec = bio_src->bi_io_vec;
}
}
+#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+ bio->bi_iter.bi_dun = bio_src->bi_iter.bi_dun;
+#endif
bio_clone_blkcg_association(bio, bio_src);
return bio;
else
req->ioprio = IOPRIO_PRIO_VALUE(IOPRIO_CLASS_NONE, 0);
req->write_hint = bio->bi_write_hint;
+#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+ req->__dun = bio->bi_iter.bi_dun;
+#endif
blk_rq_bio_prep(req->q, req, bio);
}
EXPORT_SYMBOL_GPL(blk_init_request_from_bio);
req->__data_len -= total_bytes;
/* update sector only for requests with clear definition of sector */
- if (!blk_rq_is_passthrough(req))
+ if (!blk_rq_is_passthrough(req)) {
req->__sector += total_bytes >> 9;
+#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+ if (req->__dun)
+ req->__dun += total_bytes >> 12;
+#endif
+ }
/* mixed attributes always follow the first bio */
if (req->rq_flags & RQF_MIXED_MERGE) {
dst->cpu = src->cpu;
dst->__sector = blk_rq_pos(src);
dst->__data_len = blk_rq_bytes(src);
+#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+ dst->__dun = blk_rq_dun(src);
+#endif
if (src->rq_flags & RQF_SPECIAL_PAYLOAD) {
dst->rq_flags |= RQF_SPECIAL_PAYLOAD;
dst->special_vec = src->special_vec;
#include <linux/bio.h>
#include <linux/blkdev.h>
#include <linux/scatterlist.h>
+#include <crypto/diskcipher.h>
#include <trace/events/block.h>
return ELEVATOR_NO_MERGE;
}
+static inline bool blk_crypt_mergeable(struct bio *bio1, struct bio *bio2)
+{
+#ifndef CONFIG_CRYPTO_DISKCIPHER_DUN
+ if (bio_has_crypt(bio1) == bio_has_crypt(bio2))
+ return true;
+#else
+ /* case-1. nocrypt, nocrypt: true -> merge */
+ if (!bio_has_crypt(bio1) && !bio_has_crypt(bio2))
+ return true;
+
+ /* case-2. crypt, crypt: TRUE -> MERGE, BUT CHECK DUN */
+ if (bio_has_crypt(bio1) == bio_has_crypt(bio2)) {
+ struct inode *inode1 = crypto_diskcipher_get_inode(bio1);
+ struct inode *inode2 = crypto_diskcipher_get_inode(bio2);
+
+ if (inode1 != inode2)
+ return false;
+
+ if (!inode1 || !inode2)
+ return false;
+
+ /* case-2.1 : NODUN, NODUN -> true -> merge */
+ if (!bio_dun(bio1) && !bio_dun(bio2))
+ return true;
+
+ /* case-2.2 : DUN == DUN -> true -> merge */
+ if (bio_dun(bio1) && bio_dun(bio2))
+ if (bio_end_dun(bio1) == bio_dun(bio2))
+ return true;
+
+ /* case-2.3 : DUN, NODUN -> false-> NO-MERGE
+ if (bio_has_crypt_dun(bio1) != bio_has_crypt_dun(bio2))
+ return false; */
+ }
+ /* case-3. nocrypt, crypt: false-> NO-MERGE
+ if (bio_has_crypt(bio1) != bio_has_crypt(bio2))
+ return false; */
+#endif
+
+ return false;
+}
+
/*
* For non-mq, this has to be called with the request spinlock acquired.
* For mq with scheduling, the appropriate queue wide lock should be held.
enum elv_merge blk_try_merge(struct request *rq, struct bio *bio)
{
+#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+ if (blk_rq_dun(rq) || bio_dun(bio))
+ return ELEVATOR_NO_MERGE;
+#endif
if (blk_discard_mergable(rq))
return ELEVATOR_DISCARD_MERGE;
else if (blk_rq_pos(rq) + blk_rq_sectors(rq) == bio->bi_iter.bi_sector)
Diskcipher support the crypt operation of the block host device
that has inline crypto engine.
+config CRYPTO_DISKCIPHER_DUN
+ bool "Diskcipher use dun"
+ default y
+ depends on CRYPTO_DISKCIPHER && F2FS_FS_ENCRYPTION
+ help
+ support Diskcipher use dun(device unit number)
+
config CRYPTO_DISKCIPHER_DEBUG
bool "Diskcipher debug support"
default n
#include "internal.h"
-
#ifdef CONFIG_CRYPTO_DISKCIPHER_DEBUG
#include <crypto/fmp.h>
#include <linux/mm_types.h>
if (api <= DISKC_API_MAX)
dbg->cnt[api][bi_opf]++;
else {
- if (bi_opf & REQ_CRYPT)
+ if (bi_opf)
idx = 1;
dbg->cnt[api][idx]++;
}
{
int i;
struct diskc_debug_info *dbg = &diskc_dbg;
- char name[DISKC_USER_MAX][20]
- = {"alloc", "free", "freereq", "setkey", "set", "get",
- "crypt", "clear", "null", "page-io", "readpage", "dio",
- "blk_write", "zeropage", "bufferhead",
- "dmcrypt", "merge", "diskc_check_err", "fs_dec_warn",
- "fs_enc_warn", "diskc_merge_dio", "diskc_freereq_warn",
- "diskc_freewq_warn", "disk_crypt_warn"};
+ char name[DISKC_USER_MAX][32] = {
+ "ALLOC", "FREE", "FREEREQ", "SETKEY", "SET", "GET", "CRYPT", "CLEAR",
+ "DISKC_API_MAX", "FS_PAGEIO", "FS_READP", "FS_DIO", "FS_BLOCK_WRITE",
+ "FS_ZEROPAGE", "BLK_BH", "DMCRYPT", "DISKC_MERGE", "DISKC_MERGE_ERR_INODE", "DISKC_MERGE_ERR_DISK",
+ "FS_DEC_WARN", "FS_ENC_WARN", "DISKC_MERGE_DIO", "DISKC_FREE_REQ_WARN",
+ "DISKC_FREE_WQ_WARN", "DISKC_CRYPT_WARN",
+ "DM_CRYPT_NONENCRYPT", "DM_CRYPT_CTR", "DM_CRYPT_DTR", "DM_CRYPT_OVER",
+ "F2FS_gc", "F2FS_gc_data_page", "F2FS_gc_data_page_no_key", "F2FS_gc_data_page_no_key_FC",
+ "F2FS_gc_data_page_FC", "F2FS_gc_data_block", "F2FS_gc_data_block_key",
+ "F2FS_gc_data_block_err1", "F2FS_gc_data_block_err2", "F2FS_gc_data_block_err3", "F2FS_gc_skip",
+ "DISKC_ERR", "DISKC_NO_KEY_ERR", "DISKC_NO_SYNC_ERR", "DISKC_NO_CRYPT_ERR", "DISKC_NO_DISKC_ERR"};
for (i = 0; i < DISKC_USER_MAX; i++)
if (dbg->cnt[i][0] || dbg->cnt[i][1])
/* check diskcipher for FBE */
#define DISKC_FS_ENCRYPT_DEBUG
-void crypto_diskcipher_check(struct bio *bio)
-{
#ifdef DISKC_FS_ENCRYPT_DEBUG
+static bool crypto_diskcipher_check(struct bio *bio)
+{
int ret = 0;
struct crypto_diskcipher *ci = NULL;
struct inode *inode = NULL;
page = bio->bi_io_vec[0].bv_page;
if (page && !PageAnon(page) && bio)
if (page->mapping)
- if (page->mapping->host)
+ if (page->mapping->host) {
if (page->mapping->host->i_crypt_info) {
inode = page->mapping->host;
- ci = fscrypt_get_diskcipher(page->mapping->host);
+ ci = fscrypt_get_diskcipher(inode);
if (ci && (bio->bi_aux_private != ci)
&& (!(bio->bi_flags & REQ_OP_DISCARD))) {
+ pr_err("%s: no sync err\n", __func__);
dump_err(ci, DISKC_API_GET, bio, page);
- ret = 1;
- crypto_diskcipher_debug(DISKC_CHECK_ERR, 0);
+ crypto_diskcipher_debug(DISKC_NO_SYNC_ERR, 0);
+ ret = -EINVAL;
}
- if (!inode->i_crypt_info || !ci) {
- ret = 1;
- crypto_diskcipher_debug(DISKC_CHECK_ERR, 1);
+ if (!ci) {
+ crypto_diskcipher_debug(DISKC_NO_DISKC_ERR, 1);
+ pr_err("%s: no crypt err\n", __func__);
+ ret = -EINVAL;
}
+ } else {
+ crypto_diskcipher_debug(DISKC_NO_KEY_ERR, 1);
+ ret = -EINVAL;
}
+ }
out:
crypto_diskcipher_debug(DISKC_API_GET, ret);
-#endif
+ return ret;
}
#else
+#define crypto_diskcipher_check(a) ((void)0)
+#endif
+#else
+#define crypto_diskcipher_check(a) (0)
#define disckipher_log_show(a) do { } while (0)
#endif
pr_err("%s: Invalid bio:%p\n", __func__, bio);
return NULL;
}
- if (bio->bi_opf & REQ_CRYPT)
- return bio->bi_aux_private;
- else
+
+ if (bio->bi_opf & REQ_CRYPT) {
+ if (bio->bi_aux_private) {
+ if (!crypto_diskcipher_check(bio))
+ return bio->bi_aux_private;
+ else
+ return ERR_PTR(-EINVAL);
+ } else {
+ crypto_diskcipher_debug(DISKC_NO_CRYPT_ERR, 0);
+ return ERR_PTR(-EINVAL);
+ }
+ }
+
+ return NULL;
+}
+
+struct inode *crypto_diskcipher_get_inode(struct bio *bio)
+{
+ struct crypto_diskcipher *tfm;
+
+ if (bio->bi_opf & REQ_CRYPT) {
+ tfm = bio->bi_aux_private;
+ return tfm->inode;
+ } else {
return NULL;
+ }
}
-void crypto_diskcipher_set(struct bio *bio,
- struct crypto_diskcipher *tfm)
+void crypto_diskcipher_set(struct bio *bio, struct crypto_diskcipher *tfm,
+ const struct inode *inode, u64 dun)
{
if (bio && tfm) {
bio->bi_opf |= REQ_CRYPT;
bio->bi_aux_private = tfm;
+ tfm->inode = (struct inode *)inode;
+#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+ if (dun)
+ bio->bi_iter.bi_dun = dun;
+#endif
}
crypto_diskcipher_debug(DISKC_API_SET, 0);
}
DISKC_ST_FREE_REQ,
DISKC_ST_FREE,
};
+
int crypto_diskcipher_setkey(struct crypto_diskcipher *tfm, const char *in_key,
unsigned int key_len, bool persistent)
{
clone_init(io, clone);
if (crypt_mode_diskcipher(cc))
- crypto_diskcipher_set(clone, cc->cipher_tfm.tfms_diskc[0]);
+ crypto_diskcipher_set(clone, cc->cipher_tfm.tfms_diskc[0], NULL, 0);
clone->bi_iter.bi_sector = cc->start + io->sector;
int exynos_ufs_fmp_cfg(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp,
struct scatterlist *sg,
- uint32_t index, int sector_offset)
+ uint32_t index, int sector_offset, int page_index)
{
struct fmp_request req;
struct crypto_diskcipher *dtfm;
- sector_t iv;
+ u64 iv;
struct bio *bio = get_bio(hba, lrbp);
if (!bio)
return 0;
dtfm = crypto_diskcipher_get(bio);
- if (dtfm) {
+ if (unlikely(IS_ERR(dtfm))) {
+ pr_warn("%s: fails to get crypt\n", __func__);
+ return -EINVAL;
+ } else if (dtfm) {
+#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+ if (bio_dun(bio))
+ iv = bio_dun(bio) + page_index;
+ else
+ iv = bio->bi_iter.bi_sector + (sector_t) sector_offset;
+#else
iv = bio->bi_iter.bi_sector + (sector_t) sector_offset;
+#endif
+
req.table = (void *)&lrbp->ucd_prdt_ptr[index];
req.cmdq_enabled = 0;
req.iv = &iv;
return 0;
}
#endif
- crypto_diskcipher_check(bio);
if (crypto_diskcipher_set_crypt(dtfm, &req)) {
pr_warn("%s: fails to set crypt\n", __func__);
return -EINVAL;
struct ufshcd_lrb *lrbp,
struct scatterlist *sg,
uint32_t index,
- int sector_offset);
+ int sector_offset, int page_index);
int exynos_ufs_fmp_clear(struct ufs_hba *hba, struct ufshcd_lrb *lrbp);
int exynos_ufs_fmp_sec_cfg(struct exynos_ufs *ufs);
#else
struct ufshcd_lrb *lrbp,
struct scatterlist *sg,
uint32_t index,
- int sector_offset)
+ int sector_offset, int page_index)
{
return 0;
}
static int exynos_ufs_crypto_engine_cfg(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp,
struct scatterlist *sg, int index,
- int sector_offset)
+ int sector_offset, int page_index)
{
- return exynos_ufs_fmp_cfg(hba, lrbp, sg, index, sector_offset);
+ return exynos_ufs_fmp_cfg(hba, lrbp, sg, index, sector_offset, page_index);
}
static int exynos_ufs_crypto_engine_clear(struct ufs_hba *hba,
int sg_segments;
int i, ret;
int sector_offset = 0;
+ int page_index = 0;
cmd = lrbp->cmd;
sg_segments = scsi_dma_map(cmd);
prd_table[i].reserved = 0;
hba->transferred_sector += prd_table[i].size;
- ret = ufshcd_vops_crypto_engine_cfg(hba, lrbp, sg, i, sector_offset);
+ ret = ufshcd_vops_crypto_engine_cfg(hba, lrbp, sg, i, sector_offset, page_index++);
if (ret) {
dev_err(hba->dev,
"%s: failed to configure crypto engine (%d)\n",
u8 (*get_unipro_result)(struct ufs_hba *hba, u32 num);
int (*phy_initialization)(struct ufs_hba *);
int (*crypto_engine_cfg)(struct ufs_hba *, struct ufshcd_lrb *,
- struct scatterlist *, int, int);
+ struct scatterlist *, int, int, int);
int (*crypto_engine_clear)(struct ufs_hba *, struct ufshcd_lrb *);
int (*access_control_abort)(struct ufs_hba *);
static inline int ufshcd_vops_crypto_engine_cfg(struct ufs_hba *hba,
struct ufshcd_lrb *lrbp,
struct scatterlist *sg, int index,
- int sector_offset)
+ int sector_offset, int page_index)
{
if (hba->vops && hba->vops->crypto_engine_cfg)
return hba->vops->crypto_engine_cfg(hba, lrbp, sg, index,
- sector_offset);
+ sector_offset, page_index);
return 0;
}
err = -EIO;
goto errout;
}
- fscrypt_set_bio(inode, bio);
+ fscrypt_set_bio(inode, bio, 0);
crypto_diskcipher_debug(FS_ZEROPAGE, bio->bi_opf);
err = submit_bio_wait(bio);
if (err == 0 && bio->bi_status)
return __fscrypt_disk_encrypted(inode);
}
-void fscrypt_set_bio(const struct inode *inode, struct bio *bio)
+void fscrypt_set_bio(const struct inode *inode, struct bio *bio, u64 dun)
{
#ifdef CONFIG_CRYPTO_DISKCIPHER
if (__fscrypt_disk_encrypted(inode))
- crypto_diskcipher_set(bio, inode->i_crypt_info->ci_dtfm);
+ crypto_diskcipher_set(bio, inode->i_crypt_info->ci_dtfm, inode, dun);
#endif
return;
}
*
* bios hold a dio reference between submit_bio and ->end_io.
*/
+ #ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+static bool is_inode_filesystem_type(const struct inode *inode,
+ const char *fs_type)
+{
+ if (!inode || !fs_type)
+ return false;
+
+ if (!inode->i_sb)
+ return false;
+
+ if (!inode->i_sb->s_type)
+ return false;
+
+ return (strcmp(inode->i_sb->s_type->name, fs_type) == 0);
+}
+#endif
+
static inline void dio_bio_submit(struct dio *dio, struct dio_submit *sdio)
{
struct bio *bio = sdio->bio;
#if defined(CONFIG_CRYPTO_DISKCIPHER)
if (dio->inode && fscrypt_has_encryption_key(dio->inode)) {
- fscrypt_set_bio(dio->inode, bio);
+ fscrypt_set_bio(dio->inode, bio, 0);
crypto_diskcipher_debug(FS_DIO, bio->bi_opf);
+#if defined(CONFIG_CRYPTO_DISKCIPHER_DUN)
+ /* device unit number for iv sector */
+ #define PG_DUN(i,p) \
+ ((((i)->i_ino & 0xffffffff) << 32) | ((p) & 0xffffffff))
+
+ if (is_inode_filesystem_type(dio->inode, "f2fs"))
+ fscrypt_set_bio(dio->inode, bio, PG_DUN(dio->inode,
+ (sdio->logical_offset_in_bio >> PAGE_SHIFT)));
+#endif
}
#endif
+
if (dio->is_async && dio->op == REQ_OP_READ && dio->should_dirty)
bio_set_pages_dirty(bio);
bio_set_op_attrs(io->io_bio, REQ_OP_WRITE, io_op_flags);
if (ext4_encrypted_inode(io->io_end->inode) &&
S_ISREG(io->io_end->inode->i_mode)) {
- fscrypt_set_bio(io->io_end->inode, io->io_bio);
+ fscrypt_set_bio(io->io_end->inode, io->io_bio, 0);
crypto_diskcipher_debug(FS_PAGEIO, io->io_bio->bi_opf);
}
submit_bio(io->io_bio);
bio_set_op_attrs(bio, REQ_OP_READ,
ctx ? REQ_NOENCRYPT : 0);
if (ext4_encrypted_inode(inode) && S_ISREG(inode->i_mode)) {
- fscrypt_set_bio(inode, bio);
+ fscrypt_set_bio(inode, bio, 0);
crypto_diskcipher_debug(FS_READP, bio->bi_opf);
}
}
unsigned int enabled_steps;
};
+/* device unit number for iv sector */
+#define PG_DUN(i,p) \
+ ((((i)->i_ino & 0xffffffff) << 32) | ((p)->index & 0xffffffff))
+
static inline bool f2fs_may_encrypt_bio(struct inode *inode,
struct f2fs_io_info *fio)
{
+#ifdef CONFIG_CRYPTO_DISKCIPHER
if (fio && (fio->type != DATA || fio->encrypted_page))
return false;
return (f2fs_encrypted_file(inode) &&
fscrypt_disk_encrypted(inode));
+#else
+ return false;
+#endif
}
static inline bool f2fs_bio_disk_encrypted(unsigned int bi_opf)
return false;
}
-static bool f2fs_mergeable_bio(struct bio *bio, void *ci, bool bio_encrypted)
+static bool f2fs_mergeable_bio(struct bio *bio, u64 dun, void *ci, bool bio_encrypted)
{
+#ifdef CONFIG_CRYPTO_DISKCIPHER
if (!bio)
return true;
if (!f2fs_bio_disk_encrypted(bio->bi_opf) && !bio_encrypted)
return true;
- /* ICE allows only consecutive iv_key stream. */
+#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+ if (bio->bi_aux_private == ci)
+ return bio_end_dun(bio) == dun;
+ else
+ return false;
+#else
return bio->bi_aux_private == ci;
+#endif
+#else
+ return true;
+#endif
}
static void __read_end_io(struct bio *bio)
bio_set_op_attrs(bio, fio->op, fio->op_flags);
if (f2fs_may_encrypt_bio(inode, fio))
- fscrypt_set_bio(inode, bio);
+ fscrypt_set_bio(inode, bio, PG_DUN(inode, fio->page));
inc_page_count(fio->sbi, is_read_io(fio->op) ?
__read_io_type(page): WB_DATA_TYPE(fio->page));
struct page *bio_page;
struct inode *inode;
bool bio_encrypted;
+ u64 dun;
f2fs_bug_on(sbi, is_read_io(fio->op));
bio_page = fio->encrypted_page ? fio->encrypted_page : fio->page;
inode = fio->page->mapping->host;
+ dun = PG_DUN(inode, fio->page);
bio_encrypted = f2fs_may_encrypt_bio(inode, fio);
fio->op_flags |= fio->encrypted_page ? REQ_NOENCRYPT : 0;
!__same_bdev(sbi, fio->new_blkaddr, io->bio)))
__submit_merged_bio(io);
- if (!f2fs_mergeable_bio(io->bio, fscrypt_get_diskcipher(inode), bio_encrypted))
+ if (!f2fs_mergeable_bio(io->bio, dun, fscrypt_get_diskcipher(inode), bio_encrypted))
__submit_merged_bio(io);
alloc_new:
BIO_MAX_PAGES, false,
fio->type, fio->temp);
if (bio_encrypted)
- fscrypt_set_bio(inode, io->bio);
+ fscrypt_set_bio(inode, io->bio, dun);
io->fio = *fio;
}
}
if (f2fs_may_encrypt_bio(inode, NULL))
- fscrypt_set_bio(inode, bio);
+ fscrypt_set_bio(inode, bio, PG_DUN(inode, page));
ClearPageError(page);
inc_page_count(F2FS_I_SB(inode), F2FS_RD_DATA);
sector_t block_nr;
struct f2fs_map_blocks map;
bool bio_encrypted;
+ u64 dun;
map.m_pblk = 0;
map.m_lblk = 0;
bio = NULL;
}
+ dun = PG_DUN(inode, page);
bio_encrypted = f2fs_may_encrypt_bio(inode, NULL);
- if (!f2fs_mergeable_bio(bio, fscrypt_get_diskcipher(inode), bio_encrypted)) {
+ if (!f2fs_mergeable_bio(bio, dun, fscrypt_get_diskcipher(inode), bio_encrypted)) {
__submit_bio(F2FS_I_SB(inode), bio, DATA);
bio = NULL;
}
goto set_error_page;
}
if (f2fs_may_encrypt_bio(inode, NULL))
- fscrypt_set_bio(inode, bio);
+ fscrypt_set_bio(inode, bio, dun);
}
/*
struct crypto_diskcipher {
u32 algo;
unsigned int ivsize;
+ struct inode *inode;
#ifdef USE_FREE_REQ
/* for crypto_free_req_diskcipher */
unsigned long req_jiffies;
* @bio: bio structure
*/
struct crypto_diskcipher *crypto_diskcipher_get(struct bio *bio);
+struct inode *crypto_diskcipher_get_inode(struct bio *bio);
/**
* crypto_diskcipher_set() - set diskcipher to bio
* This functions set thm to bio->bi_aux_private to pass it to host driver.
*
*/
-void crypto_diskcipher_set(struct bio *bio, struct crypto_diskcipher *tfm);
+void crypto_diskcipher_set(struct bio *bio, struct crypto_diskcipher *tfm, const struct inode *inode, u64 dun);
/**
* crypto_diskcipher_setkey() - set key for cipher
DISKC_API_ALLOC, DISKC_API_FREE, DISKC_API_FREEREQ, DISKC_API_SETKEY,
DISKC_API_SET, DISKC_API_GET, DISKC_API_CRYPT, DISKC_API_CLEAR,
DISKC_API_MAX, FS_PAGEIO, FS_READP, FS_DIO, FS_BLOCK_WRITE,
- FS_ZEROPAGE, BLK_BH, DMCRYPT, DISKC_MERGE, DISKC_CHECK_ERR,
+ FS_ZEROPAGE, BLK_BH, DMCRYPT, DISKC_MERGE, DISKC_MERGE_ERR_INODE, DISKC_MERGE_ERR_DISKC,
FS_DEC_WARN, FS_ENC_WARN, DISKC_MERGE_DIO, DISKC_FREE_REQ_WARN,
- DISKC_FREE_WQ_WARN, DISKC_CRYPT_WARN, DISKC_USER_MAX
+ DISKC_FREE_WQ_WARN, DISKC_CRYPT_WARN,
+ DM_CRYPT_NONENCRYPT, DM_CRYPT_CTR, DM_CRYPT_DTR, DM_CRYPT_OVER,
+ F2FS_gc, F2FS_gc_data_page, F2FS_gc_data_page_key, F2FS_gc_data_page_key_FC,
+ F2FS_gc_data_page_FC, F2FS_gc_data_block, F2FS_gc_data_block_key,
+ F2FS_gc_data_block_err1, F2FS_gc_data_block_err2, F2FS_gc_data_block_err3, F2FS_gc_skip,
+ DISKC_ERR, DISKC_NO_KEY_ERR, DISKC_NO_SYNC_ERR, DISKC_NO_CRYPT_ERR, DISKC_NO_DISKC_ERR,
+ DISKC_USER_MAX
};
+
#ifdef CONFIG_CRYPTO_DISKCIPHER_DEBUG
void crypto_diskcipher_debug(enum diskcipher_dbg dbg, int idx);
-void crypto_diskcipher_check(struct bio *bio);
#else
-#define crypto_diskcipher_check(a) ((void)0)
#define crypto_diskcipher_debug(a, b) ((void)0)
#endif
#else
#define crypto_free_diskcipher(a) ((void)0)
#define crypto_free_req_diskcipher(a) ((void)0)
#define crypto_diskcipher_get(a) ((void *)NULL)
-#define crypto_diskcipher_set(a, b) ((void)0)
+#define crypto_diskcipher_get_inode(a) ((void *)NULL)
+#define crypto_diskcipher_set(a, b, c, d) ((void)0)
#define crypto_diskcipher_clearkey(a) ((void)0)
#define crypto_diskcipher_setkey(a, b, c, d) (-1)
-#define crypto_diskcipher_check(a) ((void)0)
#define crypto_diskcipher_debug(a, b) ((void)0)
#endif
#endif /* _DISKCIPHER_H_ */
#define bio_sectors(bio) ((bio)->bi_iter.bi_size >> 9)
#define bio_end_sector(bio) ((bio)->bi_iter.bi_sector + bio_sectors((bio)))
+#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+#define bio_dun(bio) ((bio)->bi_iter.bi_dun)
+#define bio_duns(bio) (bio_sectors(bio) >> 3) /* 4KB unit */
+#define bio_end_dun(bio) (bio_dun(bio) + bio_duns(bio))
+#endif
+
/*
* Return the data direction, READ or WRITE.
*/
{
iter->bi_sector += bytes >> 9;
+#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+ if (iter->bi_dun)
+ iter->bi_dun += bytes >> 12;
+#endif
+
if (bio_no_advance_iter(bio)) {
iter->bi_size -= bytes;
iter->bi_done += bytes;
unsigned int __data_len; /* total data len */
int tag;
sector_t __sector; /* sector cursor */
-
+#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+ u64 __dun; /* dun for UFS */
+#endif
struct bio *bio;
struct bio *biotail;
return q->nr_requests;
}
-static inline bool blk_crypt_mergeable(struct bio *a, struct bio *b)
-{
- if (bio_has_crypt(a) == bio_has_crypt(b))
- return true;
-
- return false;
-}
-
/*
* q->prep_rq_fn return values
*/
return rq->__sector;
}
+#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+static inline sector_t blk_rq_dun(const struct request *rq)
+{
+ return rq->__dun;
+}
+#endif
+
static inline unsigned int blk_rq_bytes(const struct request *rq)
{
return rq->__data_len;
unsigned int bi_bvec_done; /* number of bytes completed in
current bvec */
+#ifdef CONFIG_CRYPTO_DISKCIPHER_DUN
+ u64 bi_dun;
+#endif
};
/*
return 0;
}
-static inline void fscrypt_set_bio(const struct inode *inode, struct bio *bio)
+static inline void fscrypt_set_bio(const struct inode *inode, struct bio *bio, u64 dun)
{
return;
}
extern void fscrypt_pullback_bio_page(struct page **, bool);
extern int fscrypt_zeroout_range(const struct inode *, pgoff_t, sector_t,
unsigned int);
-void fscrypt_set_bio(const struct inode *inode, struct bio *bio);
+void fscrypt_set_bio(const struct inode *inode, struct bio *bio, u64 dun);
void *fscrypt_get_diskcipher(const struct inode *inode);
int fscrypt_disk_encrypted(const struct inode *inode);