* can be added at commit step.
*/
struct hist_lock {
+ /*
+ * Id for each entry in the ring buffer. This is used to
+ * decide whether the ring buffer was overwritten or not.
+ *
+ * For example,
+ *
+ * |<----------- hist_lock ring buffer size ------->|
+ * pppppppppppppppppppppiiiiiiiiiiiiiiiiiiiiiiiiiiiii
+ * wrapped > iiiiiiiiiiiiiiiiiiiiiiiiiii.......................
+ *
+ * where 'p' represents an acquisition in process
+ * context, 'i' represents an acquisition in irq
+ * context.
+ *
+ * In this example, the ring buffer was overwritten by
+ * acquisitions in irq context, that should be detected on
+ * rollback or commit.
+ */
+ unsigned int hist_id;
+
/*
* Seperate stack_trace data. This will be used at commit step.
*/
*/
static atomic_t cross_gen_id; /* Can be wrapped */
+/*
+ * Make an entry of the ring buffer invalid.
+ */
+static inline void invalidate_xhlock(struct hist_lock *xhlock)
+{
+ /*
+ * Normally, xhlock->hlock.instance must be !NULL.
+ */
+ xhlock->hlock.instance = NULL;
+}
+
/*
* Lock history stacks; we have 3 nested lock history stacks:
*
*/
void crossrelease_hist_start(enum xhlock_context_t c)
{
- if (current->xhlocks)
- current->xhlock_idx_hist[c] = current->xhlock_idx;
+ struct task_struct *cur = current;
+
+ if (cur->xhlocks) {
+ cur->xhlock_idx_hist[c] = cur->xhlock_idx;
+ cur->hist_id_save[c] = cur->hist_id;
+ }
}
void crossrelease_hist_end(enum xhlock_context_t c)
{
- if (current->xhlocks)
- current->xhlock_idx = current->xhlock_idx_hist[c];
+ struct task_struct *cur = current;
+
+ if (cur->xhlocks) {
+ unsigned int idx = cur->xhlock_idx_hist[c];
+ struct hist_lock *h = &xhlock(idx);
+
+ cur->xhlock_idx = idx;
+
+ /* Check if the ring was overwritten. */
+ if (h->hist_id != cur->hist_id_save[c])
+ invalidate_xhlock(h);
+ }
}
static int cross_lock(struct lockdep_map *lock)
* Check if the xhlock is valid, which would be false if,
*
* 1. Has not used after initializaion yet.
+ * 2. Got invalidated.
*
* Remind hist_lock is implemented as a ring buffer.
*/
/* Initialize hist_lock's members */
xhlock->hlock = *hlock;
+ xhlock->hist_id = current->hist_id++;
xhlock->trace.nr_entries = 0;
xhlock->trace.max_entries = MAX_XHLOCK_TRACE_ENTRIES;
static void commit_xhlocks(struct cross_lock *xlock)
{
unsigned int cur = current->xhlock_idx;
+ unsigned int prev_hist_id = xhlock(cur).hist_id;
unsigned int i;
if (!graph_lock())
if (!same_context_xhlock(xhlock))
break;
+ /*
+ * Filter out the cases that the ring buffer was
+ * overwritten and the previous entry has a bigger
+ * hist_id than the following one, which is impossible
+ * otherwise.
+ */
+ if (unlikely(before(xhlock->hist_id, prev_hist_id)))
+ break;
+
+ prev_hist_id = xhlock->hist_id;
+
/*
* commit_xhlock() returns 0 with graph_lock already
* released if fail.
int i;
task->xhlock_idx = UINT_MAX;
+ task->hist_id = 0;
- for (i = 0; i < XHLOCK_CTX_NR; i++)
+ for (i = 0; i < XHLOCK_CTX_NR; i++) {
task->xhlock_idx_hist[i] = UINT_MAX;
+ task->hist_id_save[i] = 0;
+ }
task->xhlocks = kzalloc(sizeof(struct hist_lock) * MAX_XHLOCKS_NR,
GFP_KERNEL);