2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/slab.h>
16 #include <linux/pmem.h>
17 #include <linux/list.h>
22 static void namespace_io_release(struct device *dev)
24 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
29 static void namespace_pmem_release(struct device *dev)
31 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
33 kfree(nspm->alt_name);
38 static void namespace_blk_release(struct device *dev)
40 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
41 struct nd_region *nd_region = to_nd_region(dev->parent);
44 ida_simple_remove(&nd_region->ns_ida, nsblk->id);
45 kfree(nsblk->alt_name);
51 static struct device_type namespace_io_device_type = {
52 .name = "nd_namespace_io",
53 .release = namespace_io_release,
56 static struct device_type namespace_pmem_device_type = {
57 .name = "nd_namespace_pmem",
58 .release = namespace_pmem_release,
61 static struct device_type namespace_blk_device_type = {
62 .name = "nd_namespace_blk",
63 .release = namespace_blk_release,
66 static bool is_namespace_pmem(struct device *dev)
68 return dev ? dev->type == &namespace_pmem_device_type : false;
71 static bool is_namespace_blk(struct device *dev)
73 return dev ? dev->type == &namespace_blk_device_type : false;
76 static bool is_namespace_io(struct device *dev)
78 return dev ? dev->type == &namespace_io_device_type : false;
81 static int is_uuid_busy(struct device *dev, void *data)
83 u8 *uuid1 = data, *uuid2 = NULL;
85 if (is_namespace_pmem(dev)) {
86 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
89 } else if (is_namespace_blk(dev)) {
90 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
93 } else if (is_nd_btt(dev)) {
94 struct nd_btt *nd_btt = to_nd_btt(dev);
97 } else if (is_nd_pfn(dev)) {
98 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
100 uuid2 = nd_pfn->uuid;
103 if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
109 static int is_namespace_uuid_busy(struct device *dev, void *data)
111 if (is_nd_pmem(dev) || is_nd_blk(dev))
112 return device_for_each_child(dev, data, is_uuid_busy);
117 * nd_is_uuid_unique - verify that no other namespace has @uuid
118 * @dev: any device on a nvdimm_bus
119 * @uuid: uuid to check
121 bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
123 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
127 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
128 if (device_for_each_child(&nvdimm_bus->dev, uuid,
129 is_namespace_uuid_busy) != 0)
134 bool pmem_should_map_pages(struct device *dev)
136 struct nd_region *nd_region = to_nd_region(dev->parent);
137 struct nd_namespace_io *nsio;
139 if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
142 if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
145 if (is_nd_pfn(dev) || is_nd_btt(dev))
148 nsio = to_nd_namespace_io(dev);
149 if (region_intersects(nsio->res.start, resource_size(&nsio->res),
150 IORESOURCE_SYSTEM_RAM,
151 IORES_DESC_NONE) == REGION_MIXED)
154 #ifdef ARCH_MEMREMAP_PMEM
155 return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
160 EXPORT_SYMBOL(pmem_should_map_pages);
162 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
165 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
166 const char *suffix = NULL;
168 if (ndns->claim && is_nd_btt(ndns->claim))
171 if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
172 sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : "");
173 } else if (is_namespace_blk(&ndns->dev)) {
174 struct nd_namespace_blk *nsblk;
176 nsblk = to_nd_namespace_blk(&ndns->dev);
177 sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
178 suffix ? suffix : "");
185 EXPORT_SYMBOL(nvdimm_namespace_disk_name);
187 const u8 *nd_dev_to_uuid(struct device *dev)
189 static const u8 null_uuid[16];
194 if (is_namespace_pmem(dev)) {
195 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
198 } else if (is_namespace_blk(dev)) {
199 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
205 EXPORT_SYMBOL(nd_dev_to_uuid);
207 static ssize_t nstype_show(struct device *dev,
208 struct device_attribute *attr, char *buf)
210 struct nd_region *nd_region = to_nd_region(dev->parent);
212 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
214 static DEVICE_ATTR_RO(nstype);
216 static ssize_t __alt_name_store(struct device *dev, const char *buf,
219 char *input, *pos, *alt_name, **ns_altname;
222 if (is_namespace_pmem(dev)) {
223 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
225 ns_altname = &nspm->alt_name;
226 } else if (is_namespace_blk(dev)) {
227 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
229 ns_altname = &nsblk->alt_name;
233 if (dev->driver || to_ndns(dev)->claim)
236 input = kmemdup(buf, len + 1, GFP_KERNEL);
242 if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
247 alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
253 *ns_altname = alt_name;
254 sprintf(*ns_altname, "%s", pos);
262 static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
264 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
265 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
266 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
267 struct nd_label_id label_id;
268 resource_size_t size = 0;
269 struct resource *res;
273 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
274 for_each_dpa_resource(ndd, res)
275 if (strcmp(res->name, label_id.id) == 0)
276 size += resource_size(res);
280 static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
282 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
283 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
284 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
285 struct nd_label_id label_id;
286 struct resource *res;
289 if (!nsblk->uuid || !nsblk->lbasize || !ndd)
293 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
294 for_each_dpa_resource(ndd, res) {
295 if (strcmp(res->name, label_id.id) != 0)
298 * Resources with unacknowledged adjustments indicate a
299 * failure to update labels
301 if (res->flags & DPA_RESOURCE_ADJUSTED)
306 /* These values match after a successful label update */
307 if (count != nsblk->num_resources)
310 for (i = 0; i < nsblk->num_resources; i++) {
311 struct resource *found = NULL;
313 for_each_dpa_resource(ndd, res)
314 if (res == nsblk->res[i]) {
326 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
328 resource_size_t size;
330 nvdimm_bus_lock(&nsblk->common.dev);
331 size = __nd_namespace_blk_validate(nsblk);
332 nvdimm_bus_unlock(&nsblk->common.dev);
336 EXPORT_SYMBOL(nd_namespace_blk_validate);
339 static int nd_namespace_label_update(struct nd_region *nd_region,
342 dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
343 "namespace must be idle during label update\n");
344 if (dev->driver || to_ndns(dev)->claim)
348 * Only allow label writes that will result in a valid namespace
349 * or deletion of an existing namespace.
351 if (is_namespace_pmem(dev)) {
352 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
353 resource_size_t size = resource_size(&nspm->nsio.res);
355 if (size == 0 && nspm->uuid)
356 /* delete allocation */;
357 else if (!nspm->uuid)
360 return nd_pmem_namespace_label_update(nd_region, nspm, size);
361 } else if (is_namespace_blk(dev)) {
362 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
363 resource_size_t size = nd_namespace_blk_size(nsblk);
365 if (size == 0 && nsblk->uuid)
366 /* delete allocation */;
367 else if (!nsblk->uuid || !nsblk->lbasize)
370 return nd_blk_namespace_label_update(nd_region, nsblk, size);
375 static ssize_t alt_name_store(struct device *dev,
376 struct device_attribute *attr, const char *buf, size_t len)
378 struct nd_region *nd_region = to_nd_region(dev->parent);
382 nvdimm_bus_lock(dev);
383 wait_nvdimm_bus_probe_idle(dev);
384 rc = __alt_name_store(dev, buf, len);
386 rc = nd_namespace_label_update(nd_region, dev);
387 dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc);
388 nvdimm_bus_unlock(dev);
391 return rc < 0 ? rc : len;
394 static ssize_t alt_name_show(struct device *dev,
395 struct device_attribute *attr, char *buf)
399 if (is_namespace_pmem(dev)) {
400 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
402 ns_altname = nspm->alt_name;
403 } else if (is_namespace_blk(dev)) {
404 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
406 ns_altname = nsblk->alt_name;
410 return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
412 static DEVICE_ATTR_RW(alt_name);
414 static int scan_free(struct nd_region *nd_region,
415 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
418 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
419 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
423 struct resource *res, *last;
424 resource_size_t new_start;
427 for_each_dpa_resource(ndd, res)
428 if (strcmp(res->name, label_id->id) == 0)
434 if (n >= resource_size(res)) {
435 n -= resource_size(res);
436 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
437 nvdimm_free_dpa(ndd, res);
438 /* retry with last resource deleted */
443 * Keep BLK allocations relegated to high DPA as much as
447 new_start = res->start + n;
449 new_start = res->start;
451 rc = adjust_resource(res, new_start, resource_size(res) - n);
453 res->flags |= DPA_RESOURCE_ADJUSTED;
454 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
462 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
463 * @nd_region: the set of dimms to reclaim @n bytes from
464 * @label_id: unique identifier for the namespace consuming this dpa range
465 * @n: number of bytes per-dimm to release
467 * Assumes resources are ordered. Starting from the end try to
468 * adjust_resource() the allocation to @n, but if @n is larger than the
469 * allocation delete it and find the 'new' last allocation in the label
472 static int shrink_dpa_allocation(struct nd_region *nd_region,
473 struct nd_label_id *label_id, resource_size_t n)
477 for (i = 0; i < nd_region->ndr_mappings; i++) {
478 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
481 rc = scan_free(nd_region, nd_mapping, label_id, n);
489 static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
490 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
493 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
494 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
495 resource_size_t first_dpa;
496 struct resource *res;
499 /* allocate blk from highest dpa first */
501 first_dpa = nd_mapping->start + nd_mapping->size - n;
503 first_dpa = nd_mapping->start;
505 /* first resource allocation for this label-id or dimm */
506 res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
510 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
514 static bool space_valid(bool is_pmem, bool is_reserve,
515 struct nd_label_id *label_id, struct resource *res)
518 * For BLK-space any space is valid, for PMEM-space, it must be
519 * contiguous with an existing allocation unless we are
522 if (is_reserve || !is_pmem)
524 if (!res || strcmp(res->name, label_id->id) == 0)
530 ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
533 static resource_size_t scan_allocate(struct nd_region *nd_region,
534 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
537 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
538 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
539 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
540 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
541 const resource_size_t to_allocate = n;
542 struct resource *res;
547 for_each_dpa_resource(ndd, res) {
548 resource_size_t allocate, available = 0, free_start, free_end;
549 struct resource *next = res->sibling, *new_res = NULL;
550 enum alloc_loc loc = ALLOC_ERR;
554 /* ignore resources outside this nd_mapping */
555 if (res->start > mapping_end)
557 if (res->end < nd_mapping->start)
560 /* space at the beginning of the mapping */
561 if (!first++ && res->start > nd_mapping->start) {
562 free_start = nd_mapping->start;
563 available = res->start - free_start;
564 if (space_valid(is_pmem, is_reserve, label_id, NULL))
568 /* space between allocations */
570 free_start = res->start + resource_size(res);
571 free_end = min(mapping_end, next->start - 1);
572 if (space_valid(is_pmem, is_reserve, label_id, res)
573 && free_start < free_end) {
574 available = free_end + 1 - free_start;
579 /* space at the end of the mapping */
581 free_start = res->start + resource_size(res);
582 free_end = mapping_end;
583 if (space_valid(is_pmem, is_reserve, label_id, res)
584 && free_start < free_end) {
585 available = free_end + 1 - free_start;
590 if (!loc || !available)
592 allocate = min(available, n);
595 if (strcmp(res->name, label_id->id) == 0) {
596 /* adjust current resource up */
597 if (is_pmem && !is_reserve)
599 rc = adjust_resource(res, res->start - allocate,
600 resource_size(res) + allocate);
601 action = "cur grow up";
606 if (strcmp(next->name, label_id->id) == 0) {
607 /* adjust next resource up */
608 if (is_pmem && !is_reserve)
610 rc = adjust_resource(next, next->start
611 - allocate, resource_size(next)
614 action = "next grow up";
615 } else if (strcmp(res->name, label_id->id) == 0) {
616 action = "grow down";
621 if (strcmp(res->name, label_id->id) == 0)
622 action = "grow down";
630 if (strcmp(action, "allocate") == 0) {
631 /* BLK allocate bottom up */
633 free_start += available - allocate;
634 else if (!is_reserve && free_start != nd_mapping->start)
637 new_res = nvdimm_allocate_dpa(ndd, label_id,
638 free_start, allocate);
641 } else if (strcmp(action, "grow down") == 0) {
642 /* adjust current resource down */
643 rc = adjust_resource(res, res->start, resource_size(res)
646 res->flags |= DPA_RESOURCE_ADJUSTED;
652 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
661 * Retry scan with newly inserted resources.
662 * For example, if we did an ALLOC_BEFORE
663 * insertion there may also have been space
664 * available for an ALLOC_AFTER insertion, so we
665 * need to check this same resource again
673 * If we allocated nothing in the BLK case it may be because we are in
674 * an initial "pmem-reserve pass". Only do an initial BLK allocation
675 * when none of the DPA space is reserved.
677 if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
678 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
682 static int merge_dpa(struct nd_region *nd_region,
683 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
685 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
686 struct resource *res;
688 if (strncmp("pmem", label_id->id, 4) == 0)
691 for_each_dpa_resource(ndd, res) {
693 struct resource *next = res->sibling;
694 resource_size_t end = res->start + resource_size(res);
696 if (!next || strcmp(res->name, label_id->id) != 0
697 || strcmp(next->name, label_id->id) != 0
698 || end != next->start)
700 end += resource_size(next);
701 nvdimm_free_dpa(ndd, next);
702 rc = adjust_resource(res, res->start, end - res->start);
703 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
706 res->flags |= DPA_RESOURCE_ADJUSTED;
713 static int __reserve_free_pmem(struct device *dev, void *data)
715 struct nvdimm *nvdimm = data;
716 struct nd_region *nd_region;
717 struct nd_label_id label_id;
720 if (!is_nd_pmem(dev))
723 nd_region = to_nd_region(dev);
724 if (nd_region->ndr_mappings == 0)
727 memset(&label_id, 0, sizeof(label_id));
728 strcat(label_id.id, "pmem-reserve");
729 for (i = 0; i < nd_region->ndr_mappings; i++) {
730 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
731 resource_size_t n, rem = 0;
733 if (nd_mapping->nvdimm != nvdimm)
736 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
739 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
740 dev_WARN_ONCE(&nd_region->dev, rem,
741 "pmem reserve underrun: %#llx of %#llx bytes\n",
742 (unsigned long long) n - rem,
743 (unsigned long long) n);
744 return rem ? -ENXIO : 0;
750 static void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
751 struct nd_mapping *nd_mapping)
753 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
754 struct resource *res, *_res;
756 for_each_dpa_resource_safe(ndd, res, _res)
757 if (strcmp(res->name, "pmem-reserve") == 0)
758 nvdimm_free_dpa(ndd, res);
761 static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
762 struct nd_mapping *nd_mapping)
764 struct nvdimm *nvdimm = nd_mapping->nvdimm;
767 rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
768 __reserve_free_pmem);
770 release_free_pmem(nvdimm_bus, nd_mapping);
775 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
776 * @nd_region: the set of dimms to allocate @n more bytes from
777 * @label_id: unique identifier for the namespace consuming this dpa range
778 * @n: number of bytes per-dimm to add to the existing allocation
780 * Assumes resources are ordered. For BLK regions, first consume
781 * BLK-only available DPA free space, then consume PMEM-aliased DPA
782 * space starting at the highest DPA. For PMEM regions start
783 * allocations from the start of an interleave set and end at the first
784 * BLK allocation or the end of the interleave set, whichever comes
787 static int grow_dpa_allocation(struct nd_region *nd_region,
788 struct nd_label_id *label_id, resource_size_t n)
790 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
791 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
794 for (i = 0; i < nd_region->ndr_mappings; i++) {
795 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
796 resource_size_t rem = n;
800 * In the BLK case try once with all unallocated PMEM
801 * reserved, and once without
803 for (j = is_pmem; j < 2; j++) {
804 bool blk_only = j == 0;
807 rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
811 rem = scan_allocate(nd_region, nd_mapping,
814 release_free_pmem(nvdimm_bus, nd_mapping);
816 /* try again and allow encroachments into PMEM */
821 dev_WARN_ONCE(&nd_region->dev, rem,
822 "allocation underrun: %#llx of %#llx bytes\n",
823 (unsigned long long) n - rem,
824 (unsigned long long) n);
828 rc = merge_dpa(nd_region, nd_mapping, label_id);
836 static void nd_namespace_pmem_set_size(struct nd_region *nd_region,
837 struct nd_namespace_pmem *nspm, resource_size_t size)
839 struct resource *res = &nspm->nsio.res;
841 res->start = nd_region->ndr_start;
842 res->end = nd_region->ndr_start + size - 1;
845 static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
848 dev_dbg(dev, "%s: uuid not set\n", where);
854 static ssize_t __size_store(struct device *dev, unsigned long long val)
856 resource_size_t allocated = 0, available = 0;
857 struct nd_region *nd_region = to_nd_region(dev->parent);
858 struct nd_mapping *nd_mapping;
859 struct nvdimm_drvdata *ndd;
860 struct nd_label_id label_id;
861 u32 flags = 0, remainder;
865 if (dev->driver || to_ndns(dev)->claim)
868 if (is_namespace_pmem(dev)) {
869 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
872 } else if (is_namespace_blk(dev)) {
873 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
876 flags = NSLABEL_FLAG_LOCAL;
880 * We need a uuid for the allocation-label and dimm(s) on which
881 * to store the label.
883 if (uuid_not_set(uuid, dev, __func__))
885 if (nd_region->ndr_mappings == 0) {
886 dev_dbg(dev, "%s: not associated with dimm(s)\n", __func__);
890 div_u64_rem(val, SZ_4K * nd_region->ndr_mappings, &remainder);
892 dev_dbg(dev, "%llu is not %dK aligned\n", val,
893 (SZ_4K * nd_region->ndr_mappings) / SZ_1K);
897 nd_label_gen_id(&label_id, uuid, flags);
898 for (i = 0; i < nd_region->ndr_mappings; i++) {
899 nd_mapping = &nd_region->mapping[i];
900 ndd = to_ndd(nd_mapping);
903 * All dimms in an interleave set, or the base dimm for a blk
904 * region, need to be enabled for the size to be changed.
909 allocated += nvdimm_allocated_dpa(ndd, &label_id);
911 available = nd_region_available_dpa(nd_region);
913 if (val > available + allocated)
916 if (val == allocated)
919 val = div_u64(val, nd_region->ndr_mappings);
920 allocated = div_u64(allocated, nd_region->ndr_mappings);
922 rc = shrink_dpa_allocation(nd_region, &label_id,
925 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
930 if (is_namespace_pmem(dev)) {
931 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
933 nd_namespace_pmem_set_size(nd_region, nspm,
934 val * nd_region->ndr_mappings);
935 } else if (is_namespace_blk(dev)) {
936 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
939 * Try to delete the namespace if we deleted all of its
940 * allocation, this is not the seed device for the
941 * region, and it is not actively claimed by a btt
944 if (val == 0 && nd_region->ns_seed != dev
945 && !nsblk->common.claim)
946 nd_device_unregister(dev, ND_ASYNC);
952 static ssize_t size_store(struct device *dev,
953 struct device_attribute *attr, const char *buf, size_t len)
955 struct nd_region *nd_region = to_nd_region(dev->parent);
956 unsigned long long val;
960 rc = kstrtoull(buf, 0, &val);
965 nvdimm_bus_lock(dev);
966 wait_nvdimm_bus_probe_idle(dev);
967 rc = __size_store(dev, val);
969 rc = nd_namespace_label_update(nd_region, dev);
971 if (is_namespace_pmem(dev)) {
972 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
975 } else if (is_namespace_blk(dev)) {
976 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
981 if (rc == 0 && val == 0 && uuid) {
982 /* setting size zero == 'delete namespace' */
987 dev_dbg(dev, "%s: %llx %s (%d)\n", __func__, val, rc < 0
988 ? "fail" : "success", rc);
990 nvdimm_bus_unlock(dev);
993 return rc < 0 ? rc : len;
996 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
998 struct device *dev = &ndns->dev;
1000 if (is_namespace_pmem(dev)) {
1001 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1003 return resource_size(&nspm->nsio.res);
1004 } else if (is_namespace_blk(dev)) {
1005 return nd_namespace_blk_size(to_nd_namespace_blk(dev));
1006 } else if (is_namespace_io(dev)) {
1007 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1009 return resource_size(&nsio->res);
1011 WARN_ONCE(1, "unknown namespace type\n");
1015 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1017 resource_size_t size;
1019 nvdimm_bus_lock(&ndns->dev);
1020 size = __nvdimm_namespace_capacity(ndns);
1021 nvdimm_bus_unlock(&ndns->dev);
1025 EXPORT_SYMBOL(nvdimm_namespace_capacity);
1027 static ssize_t size_show(struct device *dev,
1028 struct device_attribute *attr, char *buf)
1030 return sprintf(buf, "%llu\n", (unsigned long long)
1031 nvdimm_namespace_capacity(to_ndns(dev)));
1033 static DEVICE_ATTR(size, S_IRUGO, size_show, size_store);
1035 static ssize_t uuid_show(struct device *dev,
1036 struct device_attribute *attr, char *buf)
1040 if (is_namespace_pmem(dev)) {
1041 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1044 } else if (is_namespace_blk(dev)) {
1045 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1052 return sprintf(buf, "%pUb\n", uuid);
1053 return sprintf(buf, "\n");
1057 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
1058 * @nd_region: parent region so we can updates all dimms in the set
1059 * @dev: namespace type for generating label_id
1060 * @new_uuid: incoming uuid
1061 * @old_uuid: reference to the uuid storage location in the namespace object
1063 static int namespace_update_uuid(struct nd_region *nd_region,
1064 struct device *dev, u8 *new_uuid, u8 **old_uuid)
1066 u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
1067 struct nd_label_id old_label_id;
1068 struct nd_label_id new_label_id;
1071 if (!nd_is_uuid_unique(dev, new_uuid))
1074 if (*old_uuid == NULL)
1078 * If we've already written a label with this uuid, then it's
1079 * too late to rename because we can't reliably update the uuid
1080 * without losing the old namespace. Userspace must delete this
1081 * namespace to abandon the old uuid.
1083 for (i = 0; i < nd_region->ndr_mappings; i++) {
1084 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1087 * This check by itself is sufficient because old_uuid
1088 * would be NULL above if this uuid did not exist in the
1089 * currently written set.
1091 * FIXME: can we delete uuid with zero dpa allocated?
1093 if (list_empty(&nd_mapping->labels))
1097 nd_label_gen_id(&old_label_id, *old_uuid, flags);
1098 nd_label_gen_id(&new_label_id, new_uuid, flags);
1099 for (i = 0; i < nd_region->ndr_mappings; i++) {
1100 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1101 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1102 struct resource *res;
1104 for_each_dpa_resource(ndd, res)
1105 if (strcmp(res->name, old_label_id.id) == 0)
1106 sprintf((void *) res->name, "%s",
1111 *old_uuid = new_uuid;
1115 static ssize_t uuid_store(struct device *dev,
1116 struct device_attribute *attr, const char *buf, size_t len)
1118 struct nd_region *nd_region = to_nd_region(dev->parent);
1123 if (is_namespace_pmem(dev)) {
1124 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1126 ns_uuid = &nspm->uuid;
1127 } else if (is_namespace_blk(dev)) {
1128 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1130 ns_uuid = &nsblk->uuid;
1135 nvdimm_bus_lock(dev);
1136 wait_nvdimm_bus_probe_idle(dev);
1137 if (to_ndns(dev)->claim)
1140 rc = nd_uuid_store(dev, &uuid, buf, len);
1142 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1144 rc = nd_namespace_label_update(nd_region, dev);
1147 dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
1148 rc, buf, buf[len - 1] == '\n' ? "" : "\n");
1149 nvdimm_bus_unlock(dev);
1152 return rc < 0 ? rc : len;
1154 static DEVICE_ATTR_RW(uuid);
1156 static ssize_t resource_show(struct device *dev,
1157 struct device_attribute *attr, char *buf)
1159 struct resource *res;
1161 if (is_namespace_pmem(dev)) {
1162 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1164 res = &nspm->nsio.res;
1165 } else if (is_namespace_io(dev)) {
1166 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1172 /* no address to convey if the namespace has no allocation */
1173 if (resource_size(res) == 0)
1175 return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1177 static DEVICE_ATTR_RO(resource);
1179 static const unsigned long ns_lbasize_supported[] = { 512, 520, 528,
1180 4096, 4104, 4160, 4224, 0 };
1182 static ssize_t sector_size_show(struct device *dev,
1183 struct device_attribute *attr, char *buf)
1185 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1187 if (!is_namespace_blk(dev))
1190 return nd_sector_size_show(nsblk->lbasize, ns_lbasize_supported, buf);
1193 static ssize_t sector_size_store(struct device *dev,
1194 struct device_attribute *attr, const char *buf, size_t len)
1196 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1197 struct nd_region *nd_region = to_nd_region(dev->parent);
1200 if (!is_namespace_blk(dev))
1204 nvdimm_bus_lock(dev);
1205 if (to_ndns(dev)->claim)
1208 rc = nd_sector_size_store(dev, buf, &nsblk->lbasize,
1209 ns_lbasize_supported);
1211 rc = nd_namespace_label_update(nd_region, dev);
1212 dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__,
1213 rc, rc < 0 ? "tried" : "wrote", buf,
1214 buf[len - 1] == '\n' ? "" : "\n");
1215 nvdimm_bus_unlock(dev);
1218 return rc ? rc : len;
1220 static DEVICE_ATTR_RW(sector_size);
1222 static ssize_t dpa_extents_show(struct device *dev,
1223 struct device_attribute *attr, char *buf)
1225 struct nd_region *nd_region = to_nd_region(dev->parent);
1226 struct nd_label_id label_id;
1231 nvdimm_bus_lock(dev);
1232 if (is_namespace_pmem(dev)) {
1233 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1237 } else if (is_namespace_blk(dev)) {
1238 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1241 flags = NSLABEL_FLAG_LOCAL;
1247 nd_label_gen_id(&label_id, uuid, flags);
1248 for (i = 0; i < nd_region->ndr_mappings; i++) {
1249 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1250 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1251 struct resource *res;
1253 for_each_dpa_resource(ndd, res)
1254 if (strcmp(res->name, label_id.id) == 0)
1258 nvdimm_bus_unlock(dev);
1260 return sprintf(buf, "%d\n", count);
1262 static DEVICE_ATTR_RO(dpa_extents);
1264 static ssize_t holder_show(struct device *dev,
1265 struct device_attribute *attr, char *buf)
1267 struct nd_namespace_common *ndns = to_ndns(dev);
1271 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1276 static DEVICE_ATTR_RO(holder);
1278 static ssize_t mode_show(struct device *dev,
1279 struct device_attribute *attr, char *buf)
1281 struct nd_namespace_common *ndns = to_ndns(dev);
1282 struct device *claim;
1287 claim = ndns->claim;
1288 if (claim && is_nd_btt(claim))
1290 else if (claim && is_nd_pfn(claim))
1292 else if (claim && is_nd_dax(claim))
1294 else if (!claim && pmem_should_map_pages(dev))
1298 rc = sprintf(buf, "%s\n", mode);
1303 static DEVICE_ATTR_RO(mode);
1305 static ssize_t force_raw_store(struct device *dev,
1306 struct device_attribute *attr, const char *buf, size_t len)
1309 int rc = strtobool(buf, &force_raw);
1314 to_ndns(dev)->force_raw = force_raw;
1318 static ssize_t force_raw_show(struct device *dev,
1319 struct device_attribute *attr, char *buf)
1321 return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1323 static DEVICE_ATTR_RW(force_raw);
1325 static struct attribute *nd_namespace_attributes[] = {
1326 &dev_attr_nstype.attr,
1327 &dev_attr_size.attr,
1328 &dev_attr_mode.attr,
1329 &dev_attr_uuid.attr,
1330 &dev_attr_holder.attr,
1331 &dev_attr_resource.attr,
1332 &dev_attr_alt_name.attr,
1333 &dev_attr_force_raw.attr,
1334 &dev_attr_sector_size.attr,
1335 &dev_attr_dpa_extents.attr,
1339 static umode_t namespace_visible(struct kobject *kobj,
1340 struct attribute *a, int n)
1342 struct device *dev = container_of(kobj, struct device, kobj);
1344 if (a == &dev_attr_resource.attr) {
1345 if (is_namespace_blk(dev))
1350 if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1351 if (a == &dev_attr_size.attr)
1352 return S_IWUSR | S_IRUGO;
1354 if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr)
1360 if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
1361 || a == &dev_attr_holder.attr
1362 || a == &dev_attr_force_raw.attr
1363 || a == &dev_attr_mode.attr)
1369 static struct attribute_group nd_namespace_attribute_group = {
1370 .attrs = nd_namespace_attributes,
1371 .is_visible = namespace_visible,
1374 static const struct attribute_group *nd_namespace_attribute_groups[] = {
1375 &nd_device_attribute_group,
1376 &nd_namespace_attribute_group,
1377 &nd_numa_attribute_group,
1381 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1383 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1384 struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
1385 struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
1386 struct nd_namespace_common *ndns = NULL;
1387 resource_size_t size;
1389 if (nd_btt || nd_pfn || nd_dax) {
1391 ndns = nd_btt->ndns;
1393 ndns = nd_pfn->ndns;
1395 ndns = nd_dax->nd_pfn.ndns;
1398 return ERR_PTR(-ENODEV);
1401 * Flush any in-progess probes / removals in the driver
1402 * for the raw personality of this namespace.
1404 device_lock(&ndns->dev);
1405 device_unlock(&ndns->dev);
1406 if (ndns->dev.driver) {
1407 dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1409 return ERR_PTR(-EBUSY);
1411 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
1412 "host (%s) vs claim (%s) mismatch\n",
1414 dev_name(ndns->claim)))
1415 return ERR_PTR(-ENXIO);
1417 ndns = to_ndns(dev);
1419 dev_dbg(dev, "claimed by %s, failing probe\n",
1420 dev_name(ndns->claim));
1422 return ERR_PTR(-ENXIO);
1426 size = nvdimm_namespace_capacity(ndns);
1427 if (size < ND_MIN_NAMESPACE_SIZE) {
1428 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1429 &size, ND_MIN_NAMESPACE_SIZE);
1430 return ERR_PTR(-ENODEV);
1433 if (is_namespace_pmem(&ndns->dev)) {
1434 struct nd_namespace_pmem *nspm;
1436 nspm = to_nd_namespace_pmem(&ndns->dev);
1437 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
1438 return ERR_PTR(-ENODEV);
1439 } else if (is_namespace_blk(&ndns->dev)) {
1440 struct nd_namespace_blk *nsblk;
1442 nsblk = to_nd_namespace_blk(&ndns->dev);
1443 if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
1444 return ERR_PTR(-ENODEV);
1445 if (!nsblk->lbasize) {
1446 dev_dbg(&ndns->dev, "%s: sector size not set\n",
1448 return ERR_PTR(-ENODEV);
1450 if (!nd_namespace_blk_validate(nsblk))
1451 return ERR_PTR(-ENODEV);
1456 EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1458 static struct device **create_namespace_io(struct nd_region *nd_region)
1460 struct nd_namespace_io *nsio;
1461 struct device *dev, **devs;
1462 struct resource *res;
1464 nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1468 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1474 dev = &nsio->common.dev;
1475 dev->type = &namespace_io_device_type;
1476 dev->parent = &nd_region->dev;
1478 res->name = dev_name(&nd_region->dev);
1479 res->flags = IORESOURCE_MEM;
1480 res->start = nd_region->ndr_start;
1481 res->end = res->start + nd_region->ndr_size - 1;
1487 static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1488 u64 cookie, u16 pos)
1490 struct nd_namespace_label *found = NULL;
1493 for (i = 0; i < nd_region->ndr_mappings; i++) {
1494 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1495 struct nd_label_ent *label_ent;
1496 bool found_uuid = false;
1498 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1499 struct nd_namespace_label *nd_label = label_ent->label;
1500 u16 position, nlabel;
1505 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1506 position = __le16_to_cpu(nd_label->position);
1507 nlabel = __le16_to_cpu(nd_label->nlabel);
1509 if (isetcookie != cookie)
1512 if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
1516 dev_dbg(to_ndd(nd_mapping)->dev,
1517 "%s duplicate entry for uuid\n",
1522 if (nlabel != nd_region->ndr_mappings)
1524 if (position != pos)
1532 return found != NULL;
1535 static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1542 for (i = 0; i < nd_region->ndr_mappings; i++) {
1543 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1544 struct nd_namespace_label *nd_label = NULL;
1545 u64 hw_start, hw_end, pmem_start, pmem_end;
1546 struct nd_label_ent *label_ent;
1548 mutex_lock(&nd_mapping->lock);
1549 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1550 nd_label = label_ent->label;
1553 if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
1557 mutex_unlock(&nd_mapping->lock);
1565 * Check that this label is compliant with the dpa
1566 * range published in NFIT
1568 hw_start = nd_mapping->start;
1569 hw_end = hw_start + nd_mapping->size;
1570 pmem_start = __le64_to_cpu(nd_label->dpa);
1571 pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
1572 if (pmem_start == hw_start && pmem_end <= hw_end)
1577 mutex_lock(&nd_mapping->lock);
1578 label_ent = list_first_entry(&nd_mapping->labels,
1579 typeof(*label_ent), list);
1580 label_ent->label = nd_label;
1581 list_del(&label_ent->list);
1582 nd_mapping_free_labels(nd_mapping);
1583 list_add(&label_ent->list, &nd_mapping->labels);
1584 mutex_unlock(&nd_mapping->lock);
1590 * find_pmem_label_set - validate interleave set labelling, retrieve label0
1591 * @nd_region: region with mappings to validate
1593 static int find_pmem_label_set(struct nd_region *nd_region,
1594 struct nd_namespace_pmem *nspm)
1596 u64 cookie = nd_region_interleave_set_cookie(nd_region);
1597 u8 select_id[NSLABEL_UUID_LEN];
1598 struct nd_label_ent *label_ent;
1599 struct nd_mapping *nd_mapping;
1600 resource_size_t size = 0;
1606 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
1611 * Find a complete set of labels by uuid. By definition we can start
1612 * with any mapping as the reference label
1614 for (i = 0; i < nd_region->ndr_mappings; i++) {
1615 nd_mapping = &nd_region->mapping[i];
1616 mutex_lock_nested(&nd_mapping->lock, i);
1618 list_for_each_entry(label_ent, &nd_region->mapping[0].labels, list) {
1619 struct nd_namespace_label *nd_label = label_ent->label;
1623 if (__le64_to_cpu(nd_label->isetcookie) != cookie)
1626 for (i = 0; i < nd_region->ndr_mappings; i++)
1627 if (!has_uuid_at_pos(nd_region, nd_label->uuid,
1630 if (i < nd_region->ndr_mappings) {
1632 * Give up if we don't find an instance of a
1633 * uuid at each position (from 0 to
1634 * nd_region->ndr_mappings - 1), or if we find a
1635 * dimm with two instances of the same uuid.
1639 } else if (pmem_id) {
1641 * If there is more than one valid uuid set, we
1642 * need userspace to clean this up.
1647 memcpy(select_id, nd_label->uuid, NSLABEL_UUID_LEN);
1648 pmem_id = select_id;
1650 for (i = 0; i < nd_region->ndr_mappings; i++) {
1651 int reverse = nd_region->ndr_mappings - 1 - i;
1653 nd_mapping = &nd_region->mapping[reverse];
1654 mutex_unlock(&nd_mapping->lock);
1661 * Fix up each mapping's 'labels' to have the validated pmem label for
1662 * that position at labels[0], and NULL at labels[1]. In the process,
1663 * check that the namespace aligns with interleave-set. We know
1664 * that it does not overlap with any blk namespaces by virtue of
1665 * the dimm being enabled (i.e. nd_label_reserve_dpa()
1668 rc = select_pmem_id(nd_region, pmem_id);
1672 /* Calculate total size and populate namespace properties from label0 */
1673 for (i = 0; i < nd_region->ndr_mappings; i++) {
1674 struct nd_namespace_label *label0;
1676 nd_mapping = &nd_region->mapping[i];
1677 mutex_lock(&nd_mapping->lock);
1678 label_ent = list_first_entry_or_null(&nd_mapping->labels,
1679 typeof(*label_ent), list);
1680 label0 = label_ent ? label_ent->label : 0;
1681 mutex_unlock(&nd_mapping->lock);
1688 size += __le64_to_cpu(label0->rawsize);
1689 if (__le16_to_cpu(label0->position) != 0)
1691 WARN_ON(nspm->alt_name || nspm->uuid);
1692 nspm->alt_name = kmemdup((void __force *) label0->name,
1693 NSLABEL_NAME_LEN, GFP_KERNEL);
1694 nspm->uuid = kmemdup((void __force *) label0->uuid,
1695 NSLABEL_UUID_LEN, GFP_KERNEL);
1698 if (!nspm->alt_name || !nspm->uuid) {
1703 nd_namespace_pmem_set_size(nd_region, nspm, size);
1709 dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__);
1712 dev_dbg(&nd_region->dev, "%s: label not found\n", __func__);
1715 dev_dbg(&nd_region->dev, "%s: unexpected err: %d\n",
1722 static struct device **create_namespace_pmem(struct nd_region *nd_region)
1724 struct nd_namespace_pmem *nspm;
1725 struct device *dev, **devs;
1726 struct resource *res;
1729 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1733 dev = &nspm->nsio.common.dev;
1734 dev->type = &namespace_pmem_device_type;
1735 dev->parent = &nd_region->dev;
1736 res = &nspm->nsio.res;
1737 res->name = dev_name(&nd_region->dev);
1738 res->flags = IORESOURCE_MEM;
1739 rc = find_pmem_label_set(nd_region, nspm);
1740 if (rc == -ENODEV) {
1743 /* Pass, try to permit namespace creation... */
1744 for (i = 0; i < nd_region->ndr_mappings; i++) {
1745 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1747 mutex_lock(&nd_mapping->lock);
1748 nd_mapping_free_labels(nd_mapping);
1749 mutex_unlock(&nd_mapping->lock);
1752 /* Publish a zero-sized namespace for userspace to configure. */
1753 nd_namespace_pmem_set_size(nd_region, nspm, 0);
1759 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1767 namespace_pmem_release(&nspm->nsio.common.dev);
1771 struct resource *nsblk_add_resource(struct nd_region *nd_region,
1772 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
1773 resource_size_t start)
1775 struct nd_label_id label_id;
1776 struct resource *res;
1778 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
1779 res = krealloc(nsblk->res,
1780 sizeof(void *) * (nsblk->num_resources + 1),
1784 nsblk->res = (struct resource **) res;
1785 for_each_dpa_resource(ndd, res)
1786 if (strcmp(res->name, label_id.id) == 0
1787 && res->start == start) {
1788 nsblk->res[nsblk->num_resources++] = res;
1794 static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
1796 struct nd_namespace_blk *nsblk;
1799 if (!is_nd_blk(&nd_region->dev))
1802 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1806 dev = &nsblk->common.dev;
1807 dev->type = &namespace_blk_device_type;
1808 nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
1809 if (nsblk->id < 0) {
1813 dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
1814 dev->parent = &nd_region->dev;
1815 dev->groups = nd_namespace_attribute_groups;
1817 return &nsblk->common.dev;
1820 void nd_region_create_blk_seed(struct nd_region *nd_region)
1822 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1823 nd_region->ns_seed = nd_namespace_blk_create(nd_region);
1825 * Seed creation failures are not fatal, provisioning is simply
1826 * disabled until memory becomes available
1828 if (!nd_region->ns_seed)
1829 dev_err(&nd_region->dev, "failed to create blk namespace\n");
1831 nd_device_register(nd_region->ns_seed);
1834 void nd_region_create_dax_seed(struct nd_region *nd_region)
1836 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1837 nd_region->dax_seed = nd_dax_create(nd_region);
1839 * Seed creation failures are not fatal, provisioning is simply
1840 * disabled until memory becomes available
1842 if (!nd_region->dax_seed)
1843 dev_err(&nd_region->dev, "failed to create dax namespace\n");
1846 void nd_region_create_pfn_seed(struct nd_region *nd_region)
1848 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1849 nd_region->pfn_seed = nd_pfn_create(nd_region);
1851 * Seed creation failures are not fatal, provisioning is simply
1852 * disabled until memory becomes available
1854 if (!nd_region->pfn_seed)
1855 dev_err(&nd_region->dev, "failed to create pfn namespace\n");
1858 void nd_region_create_btt_seed(struct nd_region *nd_region)
1860 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1861 nd_region->btt_seed = nd_btt_create(nd_region);
1863 * Seed creation failures are not fatal, provisioning is simply
1864 * disabled until memory becomes available
1866 if (!nd_region->btt_seed)
1867 dev_err(&nd_region->dev, "failed to create btt namespace\n");
1870 static struct device **scan_labels(struct nd_region *nd_region,
1871 struct nd_mapping *nd_mapping)
1873 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1874 struct device *dev, **devs = NULL;
1875 struct nd_namespace_blk *nsblk;
1876 struct nd_label_ent *label_ent;
1879 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1880 struct nd_namespace_label *nd_label = label_ent->label;
1881 char *name[NSLABEL_NAME_LEN];
1882 struct device **__devs;
1883 struct resource *res;
1888 flags = __le32_to_cpu(nd_label->flags);
1889 if (flags & NSLABEL_FLAG_LOCAL)
1894 for (i = 0; i < count; i++) {
1895 nsblk = to_nd_namespace_blk(devs[i]);
1896 if (memcmp(nsblk->uuid, nd_label->uuid,
1897 NSLABEL_UUID_LEN) == 0) {
1898 res = nsblk_add_resource(nd_region, ndd, nsblk,
1899 __le64_to_cpu(nd_label->dpa));
1902 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
1903 dev_name(&nsblk->common.dev));
1909 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
1912 memcpy(__devs, devs, sizeof(dev) * count);
1916 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1919 dev = &nsblk->common.dev;
1920 dev->type = &namespace_blk_device_type;
1921 dev->parent = &nd_region->dev;
1922 dev_set_name(dev, "namespace%d.%d", nd_region->id, count);
1923 devs[count++] = dev;
1925 nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
1926 nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
1930 memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
1932 nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
1934 res = nsblk_add_resource(nd_region, ndd, nsblk,
1935 __le64_to_cpu(nd_label->dpa));
1938 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
1939 dev_name(&nsblk->common.dev));
1942 dev_dbg(&nd_region->dev, "%s: discovered %d blk namespace%s\n",
1943 __func__, count, count == 1 ? "" : "s");
1946 /* Publish a zero-sized namespace for userspace to configure. */
1947 nd_mapping_free_labels(nd_mapping);
1949 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
1952 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1955 dev = &nsblk->common.dev;
1956 dev->type = &namespace_blk_device_type;
1957 dev->parent = &nd_region->dev;
1958 devs[count++] = dev;
1964 for (i = 0; devs[i]; i++) {
1965 nsblk = to_nd_namespace_blk(devs[i]);
1966 namespace_blk_release(&nsblk->common.dev);
1972 static struct device **create_namespace_blk(struct nd_region *nd_region)
1974 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1975 struct device **devs;
1977 if (nd_region->ndr_mappings == 0)
1980 mutex_lock(&nd_mapping->lock);
1981 devs = scan_labels(nd_region, nd_mapping);
1982 mutex_unlock(&nd_mapping->lock);
1987 static int init_active_labels(struct nd_region *nd_region)
1991 for (i = 0; i < nd_region->ndr_mappings; i++) {
1992 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1993 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1994 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1995 struct nd_label_ent *label_ent;
1999 * If the dimm is disabled then prevent the region from
2000 * being activated if it aliases DPA.
2003 if ((nvdimm->flags & NDD_ALIASING) == 0)
2005 dev_dbg(&nd_region->dev, "%s: is disabled, failing probe\n",
2006 dev_name(&nd_mapping->nvdimm->dev));
2009 nd_mapping->ndd = ndd;
2010 atomic_inc(&nvdimm->busy);
2013 count = nd_label_active_count(ndd);
2014 dev_dbg(ndd->dev, "%s: %d\n", __func__, count);
2017 for (j = 0; j < count; j++) {
2018 struct nd_namespace_label *label;
2020 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
2023 label = nd_label_active(ndd, j);
2024 label_ent->label = label;
2026 mutex_lock(&nd_mapping->lock);
2027 list_add_tail(&label_ent->list, &nd_mapping->labels);
2028 mutex_unlock(&nd_mapping->lock);
2034 mutex_lock(&nd_mapping->lock);
2035 nd_mapping_free_labels(nd_mapping);
2036 mutex_unlock(&nd_mapping->lock);
2043 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2045 struct device **devs = NULL;
2046 int i, rc = 0, type;
2049 nvdimm_bus_lock(&nd_region->dev);
2050 rc = init_active_labels(nd_region);
2052 nvdimm_bus_unlock(&nd_region->dev);
2056 type = nd_region_to_nstype(nd_region);
2058 case ND_DEVICE_NAMESPACE_IO:
2059 devs = create_namespace_io(nd_region);
2061 case ND_DEVICE_NAMESPACE_PMEM:
2062 devs = create_namespace_pmem(nd_region);
2064 case ND_DEVICE_NAMESPACE_BLK:
2065 devs = create_namespace_blk(nd_region);
2070 nvdimm_bus_unlock(&nd_region->dev);
2075 for (i = 0; devs[i]; i++) {
2076 struct device *dev = devs[i];
2079 if (type == ND_DEVICE_NAMESPACE_BLK) {
2080 struct nd_namespace_blk *nsblk;
2082 nsblk = to_nd_namespace_blk(dev);
2083 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2091 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2092 dev->groups = nd_namespace_attribute_groups;
2093 nd_device_register(dev);
2096 nd_region->ns_seed = devs[0];
2101 for (j = i; devs[j]; j++) {
2102 struct device *dev = devs[j];
2104 device_initialize(dev);
2109 * All of the namespaces we tried to register failed, so
2110 * fail region activation.