if (nfit_res)
return (void __iomem *) nfit_res->buf + offset
- - nfit_res->res->start;
+ - nfit_res->res.start;
return fallback_fn(offset, size);
}
if (nfit_res)
return (void __iomem *) nfit_res->buf + offset
- - nfit_res->res->start;
+ - nfit_res->res.start;
return devm_ioremap_nocache(dev, offset, size);
}
EXPORT_SYMBOL(__wrap_devm_ioremap_nocache);
struct nfit_test_resource *nfit_res = get_nfit_res(offset);
if (nfit_res)
- return nfit_res->buf + offset - nfit_res->res->start;
+ return nfit_res->buf + offset - nfit_res->res.start;
return devm_memremap(dev, offset, size, flags);
}
EXPORT_SYMBOL(__wrap_devm_memremap);
struct nfit_test_resource *nfit_res = get_nfit_res(offset);
if (nfit_res)
- return nfit_res->buf + offset - nfit_res->res->start;
+ return nfit_res->buf + offset - nfit_res->res.start;
return devm_memremap_pages(dev, res, ref, altmap);
}
EXPORT_SYMBOL(__wrap_devm_memremap_pages);
struct nfit_test_resource *nfit_res = get_nfit_res(offset);
if (nfit_res)
- return nfit_res->buf + offset - nfit_res->res->start;
+ return nfit_res->buf + offset - nfit_res->res.start;
return memremap(offset, size, flags);
}
EXPORT_SYMBOL(__wrap_memremap);
}
EXPORT_SYMBOL(__wrap_memunmap);
+static bool nfit_test_release_region(struct device *dev,
+ struct resource *parent, resource_size_t start,
+ resource_size_t n);
+
+static void nfit_devres_release(struct device *dev, void *data)
+{
+ struct resource *res = *((struct resource **) data);
+
+ WARN_ON(!nfit_test_release_region(NULL, &iomem_resource, res->start,
+ resource_size(res)));
+}
+
+static int match(struct device *dev, void *__res, void *match_data)
+{
+ struct resource *res = *((struct resource **) __res);
+ resource_size_t start = *((resource_size_t *) match_data);
+
+ return res->start == start;
+}
+
+static bool nfit_test_release_region(struct device *dev,
+ struct resource *parent, resource_size_t start,
+ resource_size_t n)
+{
+ if (parent == &iomem_resource) {
+ struct nfit_test_resource *nfit_res = get_nfit_res(start);
+
+ if (nfit_res) {
+ struct nfit_test_request *req;
+ struct resource *res = NULL;
+
+ if (dev) {
+ devres_release(dev, nfit_devres_release, match,
+ &start);
+ return true;
+ }
+
+ spin_lock(&nfit_res->lock);
+ list_for_each_entry(req, &nfit_res->requests, list)
+ if (req->res.start == start) {
+ res = &req->res;
+ list_del(&req->list);
+ break;
+ }
+ spin_unlock(&nfit_res->lock);
+
+ WARN(!res || resource_size(res) != n,
+ "%s: start: %llx n: %llx mismatch: %pr\n",
+ __func__, start, n, res);
+ if (res)
+ kfree(req);
+ return true;
+ }
+ }
+ return false;
+}
+
static struct resource *nfit_test_request_region(struct device *dev,
struct resource *parent, resource_size_t start,
resource_size_t n, const char *name, int flags)
if (parent == &iomem_resource) {
nfit_res = get_nfit_res(start);
if (nfit_res) {
- struct resource *res = nfit_res->res + 1;
+ struct nfit_test_request *req;
+ struct resource *res = NULL;
- if (start + n > nfit_res->res->start
- + resource_size(nfit_res->res)) {
+ if (start + n > nfit_res->res.start
+ + resource_size(&nfit_res->res)) {
pr_debug("%s: start: %llx n: %llx overflow: %pr\n",
__func__, start, n,
- nfit_res->res);
+ &nfit_res->res);
+ return NULL;
+ }
+
+ spin_lock(&nfit_res->lock);
+ list_for_each_entry(req, &nfit_res->requests, list)
+ if (start == req->res.start) {
+ res = &req->res;
+ break;
+ }
+ spin_unlock(&nfit_res->lock);
+
+ if (res) {
+ WARN(1, "%pr already busy\n", res);
return NULL;
}
+ req = kzalloc(sizeof(*req), GFP_KERNEL);
+ if (!req)
+ return NULL;
+ INIT_LIST_HEAD(&req->list);
+ res = &req->res;
+
res->start = start;
res->end = start + n - 1;
res->name = name;
res->flags = resource_type(parent);
res->flags |= IORESOURCE_BUSY | flags;
+ spin_lock(&nfit_res->lock);
+ list_add(&req->list, &nfit_res->requests);
+ spin_unlock(&nfit_res->lock);
+
+ if (dev) {
+ struct resource **d;
+
+ d = devres_alloc(nfit_devres_release,
+ sizeof(struct resource *),
+ GFP_KERNEL);
+ if (!d)
+ return NULL;
+ *d = res;
+ devres_add(dev, d);
+ }
+
pr_debug("%s: %pr\n", __func__, res);
return res;
}
}
EXPORT_SYMBOL(__wrap___devm_request_region);
-static bool nfit_test_release_region(struct resource *parent,
- resource_size_t start, resource_size_t n)
-{
- if (parent == &iomem_resource) {
- struct nfit_test_resource *nfit_res = get_nfit_res(start);
- if (nfit_res) {
- struct resource *res = nfit_res->res + 1;
-
- if (start != res->start || resource_size(res) != n)
- pr_info("%s: start: %llx n: %llx mismatch: %pr\n",
- __func__, start, n, res);
- else
- memset(res, 0, sizeof(*res));
- return true;
- }
- }
- return false;
-}
-
void __wrap___release_region(struct resource *parent, resource_size_t start,
resource_size_t n)
{
- if (!nfit_test_release_region(parent, start, n))
+ if (!nfit_test_release_region(NULL, parent, start, n))
__release_region(parent, start, n);
}
EXPORT_SYMBOL(__wrap___release_region);
void __wrap___devm_release_region(struct device *dev, struct resource *parent,
resource_size_t start, resource_size_t n)
{
- if (!nfit_test_release_region(parent, start, n))
+ if (!nfit_test_release_region(dev, parent, start, n))
__devm_release_region(dev, parent, start, n);
}
EXPORT_SYMBOL(__wrap___devm_release_region);
static void release_nfit_res(void *data)
{
struct nfit_test_resource *nfit_res = data;
- struct resource *res = nfit_res->res;
spin_lock(&nfit_test_lock);
list_del(&nfit_res->list);
spin_unlock(&nfit_test_lock);
vfree(nfit_res->buf);
- kfree(res);
kfree(nfit_res);
}
void *buf)
{
struct device *dev = &t->pdev.dev;
- struct resource *res = kzalloc(sizeof(*res) * 2, GFP_KERNEL);
struct nfit_test_resource *nfit_res = kzalloc(sizeof(*nfit_res),
GFP_KERNEL);
int rc;
- if (!res || !buf || !nfit_res)
+ if (!buf || !nfit_res)
goto err;
rc = devm_add_action(dev, release_nfit_res, nfit_res);
if (rc)
memset(buf, 0, size);
nfit_res->dev = dev;
nfit_res->buf = buf;
- nfit_res->res = res;
- res->start = *dma;
- res->end = *dma + size - 1;
- res->name = "NFIT";
+ nfit_res->res.start = *dma;
+ nfit_res->res.end = *dma + size - 1;
+ nfit_res->res.name = "NFIT";
+ spin_lock_init(&nfit_res->lock);
+ INIT_LIST_HEAD(&nfit_res->requests);
spin_lock(&nfit_test_lock);
list_add(&nfit_res->list, &t->resources);
spin_unlock(&nfit_test_lock);
err:
if (buf)
vfree(buf);
- kfree(res);
kfree(nfit_res);
return NULL;
}
continue;
spin_lock(&nfit_test_lock);
list_for_each_entry(n, &t->resources, list) {
- if (addr >= n->res->start && (addr < n->res->start
- + resource_size(n->res))) {
+ if (addr >= n->res.start && (addr < n->res.start
+ + resource_size(&n->res))) {
nfit_res = n;
break;
} else if (addr >= (unsigned long) n->buf
&& (addr < (unsigned long) n->buf
- + resource_size(n->res))) {
+ + resource_size(&n->res))) {
nfit_res = n;
break;
}