Merge branch 'mailbox-for-next' of git://git.linaro.org/landing-teams/working/fujitsu...
[deliverable/linux.git] / drivers / nvdimm / region_devs.c
index 67022f74febc9f49467a35f0a5026adcbcdc39f3..e8d5ba7b29af98f647b119640e79e581996cfdc0 100644 (file)
 #include <linux/highmem.h>
 #include <linux/sched.h>
 #include <linux/slab.h>
+#include <linux/hash.h>
+#include <linux/pmem.h>
 #include <linux/sort.h>
 #include <linux/io.h>
 #include <linux/nd.h>
 #include "nd-core.h"
 #include "nd.h"
 
+/*
+ * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
+ * irrelevant.
+ */
+#include <linux/io-64-nonatomic-hi-lo.h>
+
 static DEFINE_IDA(region_ida);
+static DEFINE_PER_CPU(int, flush_idx);
 
 static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
                struct nd_region_data *ndrd)
@@ -60,7 +69,7 @@ static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
 
 int nd_region_activate(struct nd_region *nd_region)
 {
-       int i;
+       int i, num_flush = 0;
        struct nd_region_data *ndrd;
        struct device *dev = &nd_region->dev;
        size_t flush_data_size = sizeof(void *);
@@ -72,6 +81,7 @@ int nd_region_activate(struct nd_region *nd_region)
 
                /* at least one null hint slot per-dimm for the "no-hint" case */
                flush_data_size += sizeof(void *);
+               num_flush = min_not_zero(num_flush, nvdimm->num_flush);
                if (!nvdimm->num_flush)
                        continue;
                flush_data_size += nvdimm->num_flush * sizeof(void *);
@@ -83,6 +93,7 @@ int nd_region_activate(struct nd_region *nd_region)
                return -ENOMEM;
        dev_set_drvdata(dev, ndrd);
 
+       ndrd->flush_mask = (1 << ilog2(num_flush)) - 1;
        for (i = 0; i < nd_region->ndr_mappings; i++) {
                struct nd_mapping *nd_mapping = &nd_region->mapping[i];
                struct nvdimm *nvdimm = nd_mapping->nvdimm;
@@ -864,6 +875,67 @@ struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
 }
 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
 
+/**
+ * nvdimm_flush - flush any posted write queues between the cpu and pmem media
+ * @nd_region: blk or interleaved pmem region
+ */
+void nvdimm_flush(struct nd_region *nd_region)
+{
+       struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
+       int i, idx;
+
+       /*
+        * Try to encourage some diversity in flush hint addresses
+        * across cpus assuming a limited number of flush hints.
+        */
+       idx = this_cpu_read(flush_idx);
+       idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
+
+       /*
+        * The first wmb() is needed to 'sfence' all previous writes
+        * such that they are architecturally visible for the platform
+        * buffer flush.  Note that we've already arranged for pmem
+        * writes to avoid the cache via arch_memcpy_to_pmem().  The
+        * final wmb() ensures ordering for the NVDIMM flush write.
+        */
+       wmb();
+       for (i = 0; i < nd_region->ndr_mappings; i++)
+               if (ndrd->flush_wpq[i][0])
+                       writeq(1, ndrd->flush_wpq[i][idx & ndrd->flush_mask]);
+       wmb();
+}
+EXPORT_SYMBOL_GPL(nvdimm_flush);
+
+/**
+ * nvdimm_has_flush - determine write flushing requirements
+ * @nd_region: blk or interleaved pmem region
+ *
+ * Returns 1 if writes require flushing
+ * Returns 0 if writes do not require flushing
+ * Returns -ENXIO if flushing capability can not be determined
+ */
+int nvdimm_has_flush(struct nd_region *nd_region)
+{
+       struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
+       int i;
+
+       /* no nvdimm == flushing capability unknown */
+       if (nd_region->ndr_mappings == 0)
+               return -ENXIO;
+
+       for (i = 0; i < nd_region->ndr_mappings; i++)
+               /* flush hints present, flushing required */
+               if (ndrd->flush_wpq[i][0])
+                       return 1;
+
+       /*
+        * The platform defines dimm devices without hints, assume
+        * platform persistence mechanism like ADR
+        */
+       return 0;
+}
+EXPORT_SYMBOL_GPL(nvdimm_has_flush);
+
 void __exit nd_region_devs_exit(void)
 {
        ida_destroy(&region_ida);
This page took 0.027422 seconds and 5 git commands to generate.