void *digest;
};
-struct drbd_epoch_entry {
+struct drbd_peer_request {
struct drbd_work w;
struct drbd_epoch *epoch; /* for writes */
struct drbd_conf *mdev;
extern int drbd_send_sync_param(struct drbd_conf *mdev, struct syncer_conf *sc);
extern int drbd_send_b_ack(struct drbd_conf *mdev, u32 barrier_nr,
u32 set_size);
-extern int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
- struct drbd_epoch_entry *e);
+extern int drbd_send_ack(struct drbd_conf *, enum drbd_packet,
+ struct drbd_peer_request *);
extern int drbd_send_ack_rp(struct drbd_conf *mdev, enum drbd_packet cmd,
struct p_block_req *rp);
extern int drbd_send_ack_dp(struct drbd_conf *mdev, enum drbd_packet cmd,
extern int drbd_send_ack_ex(struct drbd_conf *mdev, enum drbd_packet cmd,
sector_t sector, int blksize, u64 block_id);
extern int drbd_send_oos(struct drbd_conf *mdev, struct drbd_request *req);
-extern int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
- struct drbd_epoch_entry *e);
+extern int drbd_send_block(struct drbd_conf *, enum drbd_packet,
+ struct drbd_peer_request *);
extern int drbd_send_dblock(struct drbd_conf *mdev, struct drbd_request *req);
extern int drbd_send_drequest(struct drbd_conf *mdev, int cmd,
sector_t sector, int size, u64 block_id);
extern void drbd_csum_bio(struct drbd_conf *, struct crypto_hash *, struct bio *, void *);
-extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *, struct drbd_epoch_entry *, void *);
+extern void drbd_csum_ee(struct drbd_conf *, struct crypto_hash *,
+ struct drbd_peer_request *, void *);
/* worker callbacks */
extern int w_req_cancel_conflict(struct drbd_conf *, struct drbd_work *, int);
extern int w_read_retry_remote(struct drbd_conf *, struct drbd_work *, int);
/* drbd_receiver.c */
extern int drbd_rs_should_slow_down(struct drbd_conf *mdev, sector_t sector);
-extern int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
- const unsigned rw, const int fault_type);
+extern int drbd_submit_ee(struct drbd_conf *, struct drbd_peer_request *,
+ const unsigned, const int);
extern int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list);
-extern struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
- u64 id,
- sector_t sector,
- unsigned int data_size,
- gfp_t gfp_mask) __must_hold(local);
-extern void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
- int is_net);
+extern struct drbd_peer_request *drbd_alloc_ee(struct drbd_conf *,
+ u64, sector_t, unsigned int,
+ gfp_t) __must_hold(local);
+extern void drbd_free_some_ee(struct drbd_conf *, struct drbd_peer_request *,
+ int);
#define drbd_free_ee(m,e) drbd_free_some_ee(m, e, 0)
#define drbd_free_net_ee(m,e) drbd_free_some_ee(m, e, 1)
extern void drbd_wait_ee_list_empty(struct drbd_conf *mdev,
int __init drbd_nl_init(void);
void drbd_bcast_state(struct drbd_conf *mdev, union drbd_state);
void drbd_bcast_sync_progress(struct drbd_conf *mdev);
-void drbd_bcast_ee(struct drbd_conf *mdev,
- const char *reason, const int dgs,
- const char* seen_hash, const char* calc_hash,
- const struct drbd_epoch_entry* e);
+void drbd_bcast_ee(struct drbd_conf *, const char *, const int, const char *,
+ const char *, const struct drbd_peer_request *);
/**
return 0;
}
-static inline int drbd_ee_has_active_page(struct drbd_epoch_entry *e)
+static inline int drbd_ee_has_active_page(struct drbd_peer_request *e)
{
struct page *page = e->pages;
page_chain_for_each(page) {
* @e: Epoch entry.
*/
int drbd_send_ack(struct drbd_conf *mdev, enum drbd_packet cmd,
- struct drbd_epoch_entry *e)
+ struct drbd_peer_request *e)
{
return _drbd_send_ack(mdev, cmd,
cpu_to_be64(e->i.sector),
return 1;
}
-static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
+static int _drbd_send_zc_ee(struct drbd_conf *mdev, struct drbd_peer_request *e)
{
struct page *page = e->pages;
unsigned len = e->i.size;
* C_SYNC_SOURCE -> C_SYNC_TARGET (P_RS_DATA_REPLY)
*/
int drbd_send_block(struct drbd_conf *mdev, enum drbd_packet cmd,
- struct drbd_epoch_entry *e)
+ struct drbd_peer_request *e)
{
int ok;
struct p_data p;
goto Enomem;
drbd_ee_cache = kmem_cache_create(
- "drbd_ee", sizeof(struct drbd_epoch_entry), 0, 0, NULL);
+ "drbd_ee", sizeof(struct drbd_peer_request), 0, 0, NULL);
if (drbd_ee_cache == NULL)
goto Enomem;
cn_netlink_send(cn_reply, CN_IDX_DRBD, GFP_NOIO);
}
-void drbd_bcast_ee(struct drbd_conf *mdev,
- const char *reason, const int dgs,
- const char* seen_hash, const char* calc_hash,
- const struct drbd_epoch_entry* e)
+void drbd_bcast_ee(struct drbd_conf *mdev, const char *reason, const int dgs,
+ const char *seen_hash, const char *calc_hash,
+ const struct drbd_peer_request *e)
{
struct cn_msg *cn_reply;
struct drbd_nl_cfg_reply *reply;
static void reclaim_net_ee(struct drbd_conf *mdev, struct list_head *to_be_freed)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *e;
struct list_head *le, *tle;
/* The EEs are always appended to the end of the list. Since
stop to examine the list... */
list_for_each_safe(le, tle, &mdev->net_ee) {
- e = list_entry(le, struct drbd_epoch_entry, w.list);
+ e = list_entry(le, struct drbd_peer_request, w.list);
if (drbd_ee_has_active_page(e))
break;
list_move(le, to_be_freed);
static void drbd_kick_lo_and_reclaim_net(struct drbd_conf *mdev)
{
LIST_HEAD(reclaimed);
- struct drbd_epoch_entry *e, *t;
+ struct drbd_peer_request *e, *t;
spin_lock_irq(&mdev->tconn->req_lock);
reclaim_net_ee(mdev, &reclaimed);
drbd_wait_ee_list_empty()
*/
-struct drbd_epoch_entry *drbd_alloc_ee(struct drbd_conf *mdev,
- u64 id,
- sector_t sector,
- unsigned int data_size,
- gfp_t gfp_mask) __must_hold(local)
+struct drbd_peer_request *
+drbd_alloc_ee(struct drbd_conf *mdev, u64 id, sector_t sector,
+ unsigned int data_size, gfp_t gfp_mask) __must_hold(local)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *e;
struct page *page;
unsigned nr_pages = (data_size + PAGE_SIZE -1) >> PAGE_SHIFT;
return NULL;
}
-void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e, int is_net)
+void drbd_free_some_ee(struct drbd_conf *mdev, struct drbd_peer_request *e,
+ int is_net)
{
if (e->flags & EE_HAS_DIGEST)
kfree(e->digest);
int drbd_release_ee(struct drbd_conf *mdev, struct list_head *list)
{
LIST_HEAD(work_list);
- struct drbd_epoch_entry *e, *t;
+ struct drbd_peer_request *e, *t;
int count = 0;
int is_net = list == &mdev->net_ee;
{
LIST_HEAD(work_list);
LIST_HEAD(reclaimed);
- struct drbd_epoch_entry *e, *t;
+ struct drbd_peer_request *e, *t;
int ok = (mdev->state.conn >= C_WF_REPORT_PARAMS);
spin_lock_irq(&mdev->tconn->req_lock);
* on certain Xen deployments.
*/
/* TODO allocate from our own bio_set. */
-int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_epoch_entry *e,
- const unsigned rw, const int fault_type)
+int drbd_submit_ee(struct drbd_conf *mdev, struct drbd_peer_request *e,
+ const unsigned rw, const int fault_type)
{
struct bio *bios = NULL;
struct bio *bio;
}
static void drbd_remove_epoch_entry_interval(struct drbd_conf *mdev,
- struct drbd_epoch_entry *e)
+ struct drbd_peer_request *e)
{
struct drbd_interval *i = &e->i;
/* used from receive_RSDataReply (recv_resync_read)
* and from receive_Data */
-static struct drbd_epoch_entry *
-read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector, int data_size) __must_hold(local)
+static struct drbd_peer_request *
+read_in_block(struct drbd_conf *mdev, u64 id, sector_t sector,
+ int data_size) __must_hold(local)
{
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *e;
struct page *page;
int dgs, ds, rr;
void *dig_in = mdev->tconn->int_dig_in;
* drbd_process_done_ee() by asender only */
static int e_end_resync_block(struct drbd_conf *mdev, struct drbd_work *w, int unused)
{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
+ struct drbd_peer_request *e = (struct drbd_peer_request *)w;
sector_t sector = e->i.sector;
int ok;
static int recv_resync_read(struct drbd_conf *mdev, sector_t sector, int data_size) __releases(local)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *e;
e = read_in_block(mdev, ID_SYNCER, sector, data_size);
if (!e)
*/
static int e_end_block(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
+ struct drbd_peer_request *e = (struct drbd_peer_request *)w;
sector_t sector = e->i.sector;
int ok = 1, pcmd;
static int e_send_discard_ack(struct drbd_conf *mdev, struct drbd_work *w, int unused)
{
- struct drbd_epoch_entry *e = (struct drbd_epoch_entry *)w;
+ struct drbd_peer_request *e = (struct drbd_peer_request *)w;
int ok = 1;
D_ASSERT(mdev->tconn->net_conf->wire_protocol == DRBD_PROT_C);
unsigned int data_size)
{
sector_t sector;
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *e;
struct p_data *p = &mdev->tconn->data.rbuf.data;
int rw = WRITE;
u32 dp_flags;
{
sector_t sector;
const sector_t capacity = drbd_get_capacity(mdev->this_bdev);
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *e;
struct digest_info *di = NULL;
int size, verb;
unsigned int fault_type;
/* reads on behalf of the partner,
* "submitted" by the receiver
*/
-void drbd_endio_read_sec_final(struct drbd_epoch_entry *e) __releases(local)
+void drbd_endio_read_sec_final(struct drbd_peer_request *e) __releases(local)
{
unsigned long flags = 0;
struct drbd_conf *mdev = e->mdev;
/* writes on behalf of the partner, or resync writes,
* "submitted" by the receiver, final stage. */
-static void drbd_endio_write_sec_final(struct drbd_epoch_entry *e) __releases(local)
+static void drbd_endio_write_sec_final(struct drbd_peer_request *e) __releases(local)
{
unsigned long flags = 0;
struct drbd_conf *mdev = e->mdev;
*/
void drbd_endio_sec(struct bio *bio, int error)
{
- struct drbd_epoch_entry *e = bio->bi_private;
+ struct drbd_peer_request *e = bio->bi_private;
struct drbd_conf *mdev = e->mdev;
int uptodate = bio_flagged(bio, BIO_UPTODATE);
int is_write = bio_data_dir(bio) == WRITE;
return w_send_read_req(mdev, w, 0);
}
-void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm, struct drbd_epoch_entry *e, void *digest)
+void drbd_csum_ee(struct drbd_conf *mdev, struct crypto_hash *tfm,
+ struct drbd_peer_request *e, void *digest)
{
struct hash_desc desc;
struct scatterlist sg;
/* TODO merge common code with w_e_end_ov_req */
int w_e_send_csum(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
+ struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w);
int digest_size;
void *digest;
int ok = 1;
static int read_for_csum(struct drbd_conf *mdev, sector_t sector, int size)
{
- struct drbd_epoch_entry *e;
+ struct drbd_peer_request *e;
if (!get_ldev(mdev))
return -EIO;
}
/* helper */
-static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_epoch_entry *e)
+static void move_to_net_ee_or_free(struct drbd_conf *mdev, struct drbd_peer_request *e)
{
if (drbd_ee_has_active_page(e)) {
/* This might happen if sendpage() has not finished */
*/
int w_e_end_data_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
+ struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w);
int ok;
if (unlikely(cancel)) {
*/
int w_e_end_rsdata_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
+ struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w);
int ok;
if (unlikely(cancel)) {
int w_e_end_csum_rs_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
+ struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w);
struct digest_info *di;
int digest_size;
void *digest = NULL;
/* TODO merge common code with w_e_send_csum */
int w_e_end_ov_req(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
+ struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w);
sector_t sector = e->i.sector;
unsigned int size = e->i.size;
int digest_size;
int w_e_end_ov_reply(struct drbd_conf *mdev, struct drbd_work *w, int cancel)
{
- struct drbd_epoch_entry *e = container_of(w, struct drbd_epoch_entry, w);
+ struct drbd_peer_request *e = container_of(w, struct drbd_peer_request, w);
struct digest_info *di;
void *digest;
sector_t sector = e->i.sector;