static DEFINE_MUTEX(pasid_mutex);
-int intel_svm_bind_mm(struct device *dev, int *pasid)
+int intel_svm_bind_mm(struct device *dev, int *pasid, int flags, struct svm_dev_ops *ops)
{
struct intel_iommu *iommu = intel_svm_device_to_iommu(dev);
struct intel_svm_dev *sdev;
list_for_each_entry(sdev, &svm->devs, list) {
if (dev == sdev->dev) {
+ if (sdev->ops != ops) {
+ ret = -EBUSY;
+ goto out;
+ }
sdev->users++;
goto success;
}
}
/* Finish the setup now we know we're keeping it */
sdev->users = 1;
+ sdev->ops = ops;
init_rcu_head(&sdev->rcu);
if (!svm) {
tail = dmar_readq(iommu->reg + DMAR_PQT_REG) & PRQ_RING_MASK;
head = dmar_readq(iommu->reg + DMAR_PQH_REG) & PRQ_RING_MASK;
while (head != tail) {
+ struct intel_svm_dev *sdev;
struct vm_area_struct *vma;
struct page_req_dsc *req;
struct qi_desc resp;
up_read(&svm->mm->mmap_sem);
bad_req:
/* Accounting for major/minor faults? */
+ rcu_read_lock();
+ list_for_each_entry_rcu(sdev, &svm->devs, list) {
+ if (sdev->sid == PCI_DEVID(req->bus, req->devfn));
+ break;
+ }
+ /* Other devices can go away, but the drivers are not permitted
+ * to unbind while any page faults might be in flight. So it's
+ * OK to drop the 'lock' here now we have it. */
+ rcu_read_unlock();
+
+ if (WARN_ON(&sdev->list == &svm->devs))
+ sdev = NULL;
+
+ if (sdev && sdev->ops && sdev->ops->fault_cb) {
+ int rwxp = (req->rd_req << 3) | (req->wr_req << 2) |
+ (req->wr_req << 1) | (req->exe_req);
+ sdev->ops->fault_cb(sdev->dev, req->pasid, req->addr, req->private, rwxp, result);
+ }
if (req->lpig) {
/* Page Group Response */
struct device;
+struct svm_dev_ops {
+ void (*fault_cb)(struct device *dev, int pasid, u64 address,
+ u32 private, int rwxp, int response);
+};
+
+/* Values for rxwp in fault_cb callback */
+#define SVM_REQ_READ (1<<3)
+#define SVM_REQ_WRITE (1<<2)
+#define SVM_REQ_EXEC (1<<1)
+#define SVM_REQ_PRIV (1<<0)
+
/**
* intel_svm_bind_mm() - Bind the current process to a PASID
* @dev: Device to be granted acccess
* @pasid: Address for allocated PASID
+ * @flags: Flags. Later for requesting supervisor mode, etc.
+ * @ops: Callbacks to device driver
*
* This function attempts to enable PASID support for the given device.
* If the @pasid argument is non-%NULL, a PASID is allocated for access
* Multiple calls from the same process may result in the same PASID
* being re-used. A reference count is kept.
*/
-extern int intel_svm_bind_mm(struct device *dev, int *pasid);
+extern int intel_svm_bind_mm(struct device *dev, int *pasid, int flags,
+ struct svm_dev_ops *ops);
/**
* intel_svm_unbind_mm() - Unbind a specified PASID
#else /* CONFIG_INTEL_IOMMU_SVM */
-static inline int intel_svm_bind_mm(struct device *dev, int *pasid)
+static inline int intel_svm_bind_mm(struct device *dev, int *pasid,
+ int flags, struct svm_dev_ops *ops)
{
return -ENOSYS;
}
}
#endif /* CONFIG_INTEL_IOMMU_SVM */
-#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL))
+#define intel_svm_available(dev) (!intel_svm_bind_mm((dev), NULL, 0, NULL))
#endif /* __INTEL_SVM_H__ */