int *pmu_disable_count;
- void (*pmu_enable) (struct pmu *pmu);
- void (*pmu_disable) (struct pmu *pmu);
+ void (*pmu_enable) (struct pmu *pmu); /* optional */
+ void (*pmu_disable) (struct pmu *pmu); /* optional */
/*
* Should return -ENOENT when the @event doesn't match this PMU.
* Start the transaction, after this ->enable() doesn't need to
* do schedulability tests.
*/
- void (*start_txn) (struct pmu *pmu);
+ void (*start_txn) (struct pmu *pmu); /* optional */
/*
* If ->start_txn() disabled the ->enable() schedulability test
* then ->commit_txn() is required to perform one. On success
* the transaction is closed. On error the transaction is kept
* open until ->cancel_txn() is called.
*/
- int (*commit_txn) (struct pmu *pmu);
+ int (*commit_txn) (struct pmu *pmu); /* optional */
/*
* Will cancel the transaction, assumes ->disable() is called
* for each successfull ->enable() during the transaction.
*/
- void (*cancel_txn) (struct pmu *pmu);
+ void (*cancel_txn) (struct pmu *pmu); /* optional */
};
/**
{
struct perf_event *event, *partial_group = NULL;
struct pmu *pmu = group_event->pmu;
- bool txn = false;
if (group_event->state == PERF_EVENT_STATE_OFF)
return 0;
- /* Check if group transaction availabe */
- if (pmu->start_txn)
- txn = true;
-
- if (txn)
- pmu->start_txn(pmu);
+ pmu->start_txn(pmu);
if (event_sched_in(group_event, cpuctx, ctx)) {
- if (txn)
- pmu->cancel_txn(pmu);
+ pmu->cancel_txn(pmu);
return -EAGAIN;
}
}
}
- if (!txn || !pmu->commit_txn(pmu))
+ if (!pmu->commit_txn(pmu))
return 0;
group_error:
}
event_sched_out(group_event, cpuctx, ctx);
- if (txn)
- pmu->cancel_txn(pmu);
+ pmu->cancel_txn(pmu);
return -EAGAIN;
}
static DEFINE_MUTEX(pmus_lock);
static struct srcu_struct pmus_srcu;
+static void perf_pmu_nop_void(struct pmu *pmu)
+{
+}
+
+static int perf_pmu_nop_int(struct pmu *pmu)
+{
+ return 0;
+}
+
+static void perf_pmu_start_txn(struct pmu *pmu)
+{
+ perf_pmu_disable(pmu);
+}
+
+static int perf_pmu_commit_txn(struct pmu *pmu)
+{
+ perf_pmu_enable(pmu);
+ return 0;
+}
+
+static void perf_pmu_cancel_txn(struct pmu *pmu)
+{
+ perf_pmu_enable(pmu);
+}
+
int perf_pmu_register(struct pmu *pmu)
{
int ret;
pmu->pmu_disable_count = alloc_percpu(int);
if (!pmu->pmu_disable_count)
goto unlock;
+
+ if (!pmu->start_txn) {
+ if (pmu->pmu_enable) {
+ /*
+ * If we have pmu_enable/pmu_disable calls, install
+ * transaction stubs that use that to try and batch
+ * hardware accesses.
+ */
+ pmu->start_txn = perf_pmu_start_txn;
+ pmu->commit_txn = perf_pmu_commit_txn;
+ pmu->cancel_txn = perf_pmu_cancel_txn;
+ } else {
+ pmu->start_txn = perf_pmu_nop_void;
+ pmu->commit_txn = perf_pmu_nop_int;
+ pmu->cancel_txn = perf_pmu_nop_void;
+ }
+ }
+
+ if (!pmu->pmu_enable) {
+ pmu->pmu_enable = perf_pmu_nop_void;
+ pmu->pmu_disable = perf_pmu_nop_void;
+ }
+
list_add_rcu(&pmu->entry, &pmus);
ret = 0;
unlock: