#include <linux/list.h>
#include <linux/acpi.h>
#include <linux/sort.h>
+#include <linux/pmem.h>
#include <linux/io.h>
#include "nfit.h"
return true;
}
+static bool add_flush(struct acpi_nfit_desc *acpi_desc,
+ struct acpi_nfit_flush_address *flush)
+{
+ struct device *dev = acpi_desc->dev;
+ struct nfit_flush *nfit_flush = devm_kzalloc(dev, sizeof(*nfit_flush),
+ GFP_KERNEL);
+
+ if (!nfit_flush)
+ return false;
+ INIT_LIST_HEAD(&nfit_flush->list);
+ nfit_flush->flush = flush;
+ list_add_tail(&nfit_flush->list, &acpi_desc->flushes);
+ dev_dbg(dev, "%s: nfit_flush handle: %d hint_count: %d\n", __func__,
+ flush->device_handle, flush->hint_count);
+ return true;
+}
+
static void *add_table(struct acpi_nfit_desc *acpi_desc, void *table,
const void *end)
{
return err;
break;
case ACPI_NFIT_TYPE_FLUSH_ADDRESS:
- dev_dbg(dev, "%s: flush\n", __func__);
+ if (!add_flush(acpi_desc, table))
+ return err;
break;
case ACPI_NFIT_TYPE_SMBIOS:
dev_dbg(dev, "%s: smbios\n", __func__);
{
u16 dcr = __to_nfit_memdev(nfit_mem)->region_index;
struct nfit_memdev *nfit_memdev;
+ struct nfit_flush *nfit_flush;
struct nfit_dcr *nfit_dcr;
struct nfit_bdw *nfit_bdw;
struct nfit_idt *nfit_idt;
nfit_mem->idt_bdw = nfit_idt->idt;
break;
}
+
+ list_for_each_entry(nfit_flush, &acpi_desc->flushes, list) {
+ if (nfit_flush->flush->device_handle !=
+ nfit_memdev->memdev->device_handle)
+ continue;
+ nfit_mem->nfit_flush = nfit_flush;
+ break;
+ }
break;
}
return mmio->base_offset + line_offset + table_offset + sub_line_offset;
}
+static void wmb_blk(struct nfit_blk *nfit_blk)
+{
+
+ if (nfit_blk->nvdimm_flush) {
+ /*
+ * The first wmb() is needed to 'sfence' all previous writes
+ * such that they are architecturally visible for the platform
+ * buffer flush. Note that we've already arranged for pmem
+ * writes to avoid the cache via arch_memcpy_to_pmem(). The
+ * final wmb() ensures ordering for the NVDIMM flush write.
+ */
+ wmb();
+ writeq(1, nfit_blk->nvdimm_flush);
+ wmb();
+ } else
+ wmb_pmem();
+}
+
static u64 read_blk_stat(struct nfit_blk *nfit_blk, unsigned int bw)
{
struct nfit_blk_mmio *mmio = &nfit_blk->mmio[DCR];
offset = to_interleave_offset(offset, mmio);
writeq(cmd, mmio->base + offset);
+ wmb_blk(nfit_blk);
/* FIXME: conditionally perform read-back if mandated by firmware */
}
base_offset = nfit_blk->bdw_offset + dpa % L1_CACHE_BYTES
+ lane * mmio->size;
- /* TODO: non-temporal access, flush hints, cache management etc... */
write_blk_ctl(nfit_blk, lane, dpa, len, rw);
while (len) {
unsigned int c;
}
if (rw)
- memcpy(mmio->aperture + offset, iobuf + copied, c);
+ memcpy_to_pmem(mmio->aperture + offset,
+ iobuf + copied, c);
else
- memcpy(iobuf + copied, mmio->aperture + offset, c);
+ memcpy_from_pmem(iobuf + copied,
+ mmio->aperture + offset, c);
copied += c;
len -= c;
}
+
+ if (rw)
+ wmb_blk(nfit_blk);
+
rc = read_blk_stat(nfit_blk, lane) ? -EIO : 0;
return rc;
}
}
static void __iomem *__nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
- struct acpi_nfit_system_address *spa)
+ struct acpi_nfit_system_address *spa, enum spa_map_type type)
{
resource_size_t start = spa->address;
resource_size_t n = spa->length;
if (!res)
goto err_mem;
- /* TODO: cacheability based on the spa type */
- spa_map->iomem = ioremap_nocache(start, n);
+ if (type == SPA_MAP_APERTURE) {
+ /*
+ * TODO: memremap_pmem() support, but that requires cache
+ * flushing when the aperture is moved.
+ */
+ spa_map->iomem = ioremap_wc(start, n);
+ } else
+ spa_map->iomem = ioremap_nocache(start, n);
+
if (!spa_map->iomem)
goto err_map;
* nfit_spa_map - interleave-aware managed-mappings of acpi_nfit_system_address ranges
* @nvdimm_bus: NFIT-bus that provided the spa table entry
* @nfit_spa: spa table to map
+ * @type: aperture or control region
*
* In the case where block-data-window apertures and
* dimm-control-regions are interleaved they will end up sharing a
* unbound.
*/
static void __iomem *nfit_spa_map(struct acpi_nfit_desc *acpi_desc,
- struct acpi_nfit_system_address *spa)
+ struct acpi_nfit_system_address *spa, enum spa_map_type type)
{
void __iomem *iomem;
mutex_lock(&acpi_desc->spa_map_mutex);
- iomem = __nfit_spa_map(acpi_desc, spa);
+ iomem = __nfit_spa_map(acpi_desc, spa, type);
mutex_unlock(&acpi_desc->spa_map_mutex);
return iomem;
struct nvdimm_bus_descriptor *nd_desc = to_nd_desc(nvdimm_bus);
struct acpi_nfit_desc *acpi_desc = to_acpi_desc(nd_desc);
struct nd_blk_region *ndbr = to_nd_blk_region(dev);
+ struct nfit_flush *nfit_flush;
struct nfit_blk_mmio *mmio;
struct nfit_blk *nfit_blk;
struct nfit_mem *nfit_mem;
/* map block aperture memory */
nfit_blk->bdw_offset = nfit_mem->bdw->offset;
mmio = &nfit_blk->mmio[BDW];
- mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw);
+ mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_bdw,
+ SPA_MAP_APERTURE);
if (!mmio->base) {
dev_dbg(dev, "%s: %s failed to map bdw\n", __func__,
nvdimm_name(nvdimm));
nfit_blk->cmd_offset = nfit_mem->dcr->command_offset;
nfit_blk->stat_offset = nfit_mem->dcr->status_offset;
mmio = &nfit_blk->mmio[DCR];
- mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr);
+ mmio->base = nfit_spa_map(acpi_desc, nfit_mem->spa_dcr,
+ SPA_MAP_CONTROL);
if (!mmio->base) {
dev_dbg(dev, "%s: %s failed to map dcr\n", __func__,
nvdimm_name(nvdimm));
return rc;
}
+ nfit_flush = nfit_mem->nfit_flush;
+ if (nfit_flush && nfit_flush->flush->hint_count != 0) {
+ nfit_blk->nvdimm_flush = devm_ioremap_nocache(dev,
+ nfit_flush->flush->hint_address[0], 8);
+ if (!nfit_blk->nvdimm_flush)
+ return -ENOMEM;
+ }
+
+ if (!arch_has_pmem_api() && !nfit_blk->nvdimm_flush)
+ dev_warn(dev, "unable to guarantee persistence of writes\n");
+
if (mmio->line_size == 0)
return 0;
INIT_LIST_HEAD(&acpi_desc->dcrs);
INIT_LIST_HEAD(&acpi_desc->bdws);
INIT_LIST_HEAD(&acpi_desc->idts);
+ INIT_LIST_HEAD(&acpi_desc->flushes);
INIT_LIST_HEAD(&acpi_desc->memdevs);
INIT_LIST_HEAD(&acpi_desc->dimms);
mutex_init(&acpi_desc->spa_map_mutex);