xfs_buftarg_t *log_target,
xfs_daddr_t blk_offset,
int num_bblks);
-STATIC int xlog_space_left(struct log *log, int64_t *head);
+STATIC int xlog_space_left(struct log *log, atomic64_t *head);
STATIC int xlog_sync(xlog_t *log, xlog_in_core_t *iclog);
STATIC void xlog_dealloc_log(xlog_t *log);
static void
xlog_grant_sub_space(
struct log *log,
- int64_t *head,
+ atomic64_t *head,
int bytes)
{
int cycle, space;
static void
xlog_grant_add_space(
struct log *log,
- int64_t *head,
+ atomic64_t *head,
int bytes)
{
int tmp;
STATIC int
xlog_space_left(
struct log *log,
- int64_t *head)
+ atomic64_t *head)
{
int free_bytes;
int tail_bytes;
spinlock_t l_grant_lock ____cacheline_aligned_in_smp;
struct list_head l_reserveq;
struct list_head l_writeq;
- int64_t l_grant_reserve_head;
- int64_t l_grant_write_head;
+ atomic64_t l_grant_reserve_head;
+ atomic64_t l_grant_write_head;
/*
* l_last_sync_lsn and l_tail_lsn are atomics so they can be set and
* will always get consistent component values to work from.
*/
static inline void
-xlog_crack_grant_head(int64_t *head, int *cycle, int *space)
+xlog_crack_grant_head(atomic64_t *head, int *cycle, int *space)
{
- int64_t val = *head;
+ int64_t val = atomic64_read(head);
*cycle = val >> 32;
*space = val & 0xffffffff;
}
static inline void
-xlog_assign_grant_head(int64_t *head, int cycle, int space)
+xlog_assign_grant_head(atomic64_t *head, int cycle, int space)
{
- *head = ((int64_t)cycle << 32) | space;
+ atomic64_set(head, ((int64_t)cycle << 32) | space);
}
/*