if (bio_check_eod(bio, nr_sectors))
goto end_io;
- if ((bio->bi_rw & REQ_DISCARD) && !blk_queue_discard(q)) {
+ if ((bio->bi_rw & REQ_DISCARD) &&
+ (!blk_queue_discard(q) ||
+ ((bio->bi_rw & REQ_SECURE) &&
+ !blk_queue_secdiscard(q)))) {
err = -EOPNOTSUPP;
goto end_io;
}
max_discard_sectors &= ~(disc_sects - 1);
}
+ if (flags & BLKDEV_IFL_SECURE) {
+ if (!blk_queue_secdiscard(q))
+ return -EOPNOTSUPP;
+ type |= DISCARD_SECURE;
+ }
+
while (nr_sects && !ret) {
bio = bio_alloc(gfp_mask, 1);
if (!bio) {
case BLKFLSBUF:
case BLKROSET:
case BLKDISCARD:
+ case BLKSECDISCARD:
/*
* the ones below are implemented in blkdev_locked_ioctl,
* but we call blkdev_ioctl, which gets the lock for us
if ((bio->bi_rw & REQ_DISCARD) != (rq->bio->bi_rw & REQ_DISCARD))
return 0;
+ /*
+ * Don't merge discard requests and secure discard requests
+ */
+ if ((bio->bi_rw & REQ_SECURE) != (rq->bio->bi_rw & REQ_SECURE))
+ return 0;
+
/*
* different data direction or already started, don't merge
*/
}
static int blk_ioctl_discard(struct block_device *bdev, uint64_t start,
- uint64_t len)
+ uint64_t len, int secure)
{
+ unsigned long flags = BLKDEV_IFL_WAIT;
+
if (start & 511)
return -EINVAL;
if (len & 511)
if (start + len > (bdev->bd_inode->i_size >> 9))
return -EINVAL;
- return blkdev_issue_discard(bdev, start, len, GFP_KERNEL,
- BLKDEV_IFL_WAIT);
+ if (secure)
+ flags |= BLKDEV_IFL_SECURE;
+ return blkdev_issue_discard(bdev, start, len, GFP_KERNEL, flags);
}
static int put_ushort(unsigned long arg, unsigned short val)
set_device_ro(bdev, n);
return 0;
- case BLKDISCARD: {
+ case BLKDISCARD:
+ case BLKSECDISCARD: {
uint64_t range[2];
if (!(mode & FMODE_WRITE))
if (copy_from_user(range, (void __user *)arg, sizeof(range)))
return -EFAULT;
- return blk_ioctl_discard(bdev, range[0], range[1]);
+ return blk_ioctl_discard(bdev, range[0], range[1],
+ cmd == BLKSECDISCARD);
}
case HDIO_GETGEO: {
__REQ_FLUSH, /* request for cache flush */
__REQ_IO_STAT, /* account I/O stat */
__REQ_MIXED_MERGE, /* merge of different types, fail separately */
+ __REQ_SECURE, /* secure discard (used with __REQ_DISCARD) */
__REQ_NR_BITS, /* stops here */
};
#define REQ_FLUSH (1 << __REQ_FLUSH)
#define REQ_IO_STAT (1 << __REQ_IO_STAT)
#define REQ_MIXED_MERGE (1 << __REQ_MIXED_MERGE)
+#define REQ_SECURE (1 << __REQ_SECURE)
#endif /* __LINUX_BLK_TYPES_H */
#define QUEUE_FLAG_DISCARD 16 /* supports DISCARD */
#define QUEUE_FLAG_NOXMERGES 17 /* No extended merges */
#define QUEUE_FLAG_ADD_RANDOM 18 /* Contributes to random pool */
+#define QUEUE_FLAG_SECDISCARD 19 /* supports SECDISCARD */
#define QUEUE_FLAG_DEFAULT ((1 << QUEUE_FLAG_IO_STAT) | \
(1 << QUEUE_FLAG_CLUSTER) | \
#define blk_queue_stackable(q) \
test_bit(QUEUE_FLAG_STACKABLE, &(q)->queue_flags)
#define blk_queue_discard(q) test_bit(QUEUE_FLAG_DISCARD, &(q)->queue_flags)
+#define blk_queue_secdiscard(q) (blk_queue_discard(q) && \
+ test_bit(QUEUE_FLAG_SECDISCARD, &(q)->queue_flags))
#define blk_noretry_request(rq) \
((rq)->cmd_flags & (REQ_FAILFAST_DEV|REQ_FAILFAST_TRANSPORT| \
}
enum{
BLKDEV_WAIT, /* wait for completion */
- BLKDEV_BARRIER, /*issue request with barrier */
+ BLKDEV_BARRIER, /* issue request with barrier */
+ BLKDEV_SECURE, /* secure discard */
};
#define BLKDEV_IFL_WAIT (1 << BLKDEV_WAIT)
#define BLKDEV_IFL_BARRIER (1 << BLKDEV_BARRIER)
+#define BLKDEV_IFL_SECURE (1 << BLKDEV_SECURE)
extern int blkdev_issue_flush(struct block_device *, gfp_t, sector_t *,
unsigned long);
extern int blkdev_issue_discard(struct block_device *bdev, sector_t sector,
*/
#define DISCARD_NOBARRIER (WRITE | REQ_DISCARD)
#define DISCARD_BARRIER (WRITE | REQ_DISCARD | REQ_HARDBARRIER)
+#define DISCARD_SECURE (DISCARD_NOBARRIER | REQ_SECURE)
#define SEL_IN 1
#define SEL_OUT 2
#define BLKALIGNOFF _IO(0x12,122)
#define BLKPBSZGET _IO(0x12,123)
#define BLKDISCARDZEROES _IO(0x12,124)
+#define BLKSECDISCARD _IO(0x12,125)
#define BMAP_IOCTL 1 /* obsolete - kept for compatibility */
#define FIBMAP _IO(0x00,1) /* bmap access */
if (rq->cmd_flags & REQ_DISCARD)
rw |= REQ_DISCARD;
+ if (rq->cmd_flags & REQ_SECURE)
+ rw |= REQ_SECURE;
+
if (rq->cmd_type == REQ_TYPE_BLOCK_PC) {
what |= BLK_TC_ACT(BLK_TC_PC);
__blk_add_trace(bt, 0, blk_rq_bytes(rq), rw,
rwbs[i++] = 'S';
if (rw & REQ_META)
rwbs[i++] = 'M';
+ if (rw & REQ_SECURE)
+ rwbs[i++] = 'E';
rwbs[i] = '\0';
}
if (rq->cmd_flags & REQ_DISCARD)
rw |= REQ_DISCARD;
+ if (rq->cmd_flags & REQ_SECURE)
+ rw |= REQ_SECURE;
+
bytes = blk_rq_bytes(rq);
blk_fill_rwbs(rwbs, rw, bytes);