libnvdimm, label: convert label tracking to a linked list
[cascardo/linux.git] / drivers / nvdimm / namespace_devs.c
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/slab.h>
16 #include <linux/pmem.h>
17 #include <linux/list.h>
18 #include <linux/nd.h>
19 #include "nd-core.h"
20 #include "nd.h"
21
22 static void namespace_io_release(struct device *dev)
23 {
24         struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
25
26         kfree(nsio);
27 }
28
29 static void namespace_pmem_release(struct device *dev)
30 {
31         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
32
33         kfree(nspm->alt_name);
34         kfree(nspm->uuid);
35         kfree(nspm);
36 }
37
38 static void namespace_blk_release(struct device *dev)
39 {
40         struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
41         struct nd_region *nd_region = to_nd_region(dev->parent);
42
43         if (nsblk->id >= 0)
44                 ida_simple_remove(&nd_region->ns_ida, nsblk->id);
45         kfree(nsblk->alt_name);
46         kfree(nsblk->uuid);
47         kfree(nsblk->res);
48         kfree(nsblk);
49 }
50
51 static struct device_type namespace_io_device_type = {
52         .name = "nd_namespace_io",
53         .release = namespace_io_release,
54 };
55
56 static struct device_type namespace_pmem_device_type = {
57         .name = "nd_namespace_pmem",
58         .release = namespace_pmem_release,
59 };
60
61 static struct device_type namespace_blk_device_type = {
62         .name = "nd_namespace_blk",
63         .release = namespace_blk_release,
64 };
65
66 static bool is_namespace_pmem(struct device *dev)
67 {
68         return dev ? dev->type == &namespace_pmem_device_type : false;
69 }
70
71 static bool is_namespace_blk(struct device *dev)
72 {
73         return dev ? dev->type == &namespace_blk_device_type : false;
74 }
75
76 static bool is_namespace_io(struct device *dev)
77 {
78         return dev ? dev->type == &namespace_io_device_type : false;
79 }
80
81 static int is_uuid_busy(struct device *dev, void *data)
82 {
83         u8 *uuid1 = data, *uuid2 = NULL;
84
85         if (is_namespace_pmem(dev)) {
86                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
87
88                 uuid2 = nspm->uuid;
89         } else if (is_namespace_blk(dev)) {
90                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
91
92                 uuid2 = nsblk->uuid;
93         } else if (is_nd_btt(dev)) {
94                 struct nd_btt *nd_btt = to_nd_btt(dev);
95
96                 uuid2 = nd_btt->uuid;
97         } else if (is_nd_pfn(dev)) {
98                 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
99
100                 uuid2 = nd_pfn->uuid;
101         }
102
103         if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
104                 return -EBUSY;
105
106         return 0;
107 }
108
109 static int is_namespace_uuid_busy(struct device *dev, void *data)
110 {
111         if (is_nd_pmem(dev) || is_nd_blk(dev))
112                 return device_for_each_child(dev, data, is_uuid_busy);
113         return 0;
114 }
115
116 /**
117  * nd_is_uuid_unique - verify that no other namespace has @uuid
118  * @dev: any device on a nvdimm_bus
119  * @uuid: uuid to check
120  */
121 bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
122 {
123         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
124
125         if (!nvdimm_bus)
126                 return false;
127         WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
128         if (device_for_each_child(&nvdimm_bus->dev, uuid,
129                                 is_namespace_uuid_busy) != 0)
130                 return false;
131         return true;
132 }
133
134 bool pmem_should_map_pages(struct device *dev)
135 {
136         struct nd_region *nd_region = to_nd_region(dev->parent);
137         struct nd_namespace_io *nsio;
138
139         if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
140                 return false;
141
142         if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
143                 return false;
144
145         if (is_nd_pfn(dev) || is_nd_btt(dev))
146                 return false;
147
148         nsio = to_nd_namespace_io(dev);
149         if (region_intersects(nsio->res.start, resource_size(&nsio->res),
150                                 IORESOURCE_SYSTEM_RAM,
151                                 IORES_DESC_NONE) == REGION_MIXED)
152                 return false;
153
154 #ifdef ARCH_MEMREMAP_PMEM
155         return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
156 #else
157         return false;
158 #endif
159 }
160 EXPORT_SYMBOL(pmem_should_map_pages);
161
162 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
163                 char *name)
164 {
165         struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
166         const char *suffix = NULL;
167
168         if (ndns->claim && is_nd_btt(ndns->claim))
169                 suffix = "s";
170
171         if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
172                 sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : "");
173         } else if (is_namespace_blk(&ndns->dev)) {
174                 struct nd_namespace_blk *nsblk;
175
176                 nsblk = to_nd_namespace_blk(&ndns->dev);
177                 sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
178                                 suffix ? suffix : "");
179         } else {
180                 return NULL;
181         }
182
183         return name;
184 }
185 EXPORT_SYMBOL(nvdimm_namespace_disk_name);
186
187 const u8 *nd_dev_to_uuid(struct device *dev)
188 {
189         static const u8 null_uuid[16];
190
191         if (!dev)
192                 return null_uuid;
193
194         if (is_namespace_pmem(dev)) {
195                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
196
197                 return nspm->uuid;
198         } else if (is_namespace_blk(dev)) {
199                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
200
201                 return nsblk->uuid;
202         } else
203                 return null_uuid;
204 }
205 EXPORT_SYMBOL(nd_dev_to_uuid);
206
207 static ssize_t nstype_show(struct device *dev,
208                 struct device_attribute *attr, char *buf)
209 {
210         struct nd_region *nd_region = to_nd_region(dev->parent);
211
212         return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
213 }
214 static DEVICE_ATTR_RO(nstype);
215
216 static ssize_t __alt_name_store(struct device *dev, const char *buf,
217                 const size_t len)
218 {
219         char *input, *pos, *alt_name, **ns_altname;
220         ssize_t rc;
221
222         if (is_namespace_pmem(dev)) {
223                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
224
225                 ns_altname = &nspm->alt_name;
226         } else if (is_namespace_blk(dev)) {
227                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
228
229                 ns_altname = &nsblk->alt_name;
230         } else
231                 return -ENXIO;
232
233         if (dev->driver || to_ndns(dev)->claim)
234                 return -EBUSY;
235
236         input = kmemdup(buf, len + 1, GFP_KERNEL);
237         if (!input)
238                 return -ENOMEM;
239
240         input[len] = '\0';
241         pos = strim(input);
242         if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
243                 rc = -EINVAL;
244                 goto out;
245         }
246
247         alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
248         if (!alt_name) {
249                 rc = -ENOMEM;
250                 goto out;
251         }
252         kfree(*ns_altname);
253         *ns_altname = alt_name;
254         sprintf(*ns_altname, "%s", pos);
255         rc = len;
256
257 out:
258         kfree(input);
259         return rc;
260 }
261
262 static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
263 {
264         struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
265         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
266         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
267         struct nd_label_id label_id;
268         resource_size_t size = 0;
269         struct resource *res;
270
271         if (!nsblk->uuid)
272                 return 0;
273         nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
274         for_each_dpa_resource(ndd, res)
275                 if (strcmp(res->name, label_id.id) == 0)
276                         size += resource_size(res);
277         return size;
278 }
279
280 static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
281 {
282         struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
283         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
284         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
285         struct nd_label_id label_id;
286         struct resource *res;
287         int count, i;
288
289         if (!nsblk->uuid || !nsblk->lbasize || !ndd)
290                 return false;
291
292         count = 0;
293         nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
294         for_each_dpa_resource(ndd, res) {
295                 if (strcmp(res->name, label_id.id) != 0)
296                         continue;
297                 /*
298                  * Resources with unacknowledged adjustments indicate a
299                  * failure to update labels
300                  */
301                 if (res->flags & DPA_RESOURCE_ADJUSTED)
302                         return false;
303                 count++;
304         }
305
306         /* These values match after a successful label update */
307         if (count != nsblk->num_resources)
308                 return false;
309
310         for (i = 0; i < nsblk->num_resources; i++) {
311                 struct resource *found = NULL;
312
313                 for_each_dpa_resource(ndd, res)
314                         if (res == nsblk->res[i]) {
315                                 found = res;
316                                 break;
317                         }
318                 /* stale resource */
319                 if (!found)
320                         return false;
321         }
322
323         return true;
324 }
325
326 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
327 {
328         resource_size_t size;
329
330         nvdimm_bus_lock(&nsblk->common.dev);
331         size = __nd_namespace_blk_validate(nsblk);
332         nvdimm_bus_unlock(&nsblk->common.dev);
333
334         return size;
335 }
336 EXPORT_SYMBOL(nd_namespace_blk_validate);
337
338
339 static int nd_namespace_label_update(struct nd_region *nd_region,
340                 struct device *dev)
341 {
342         dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
343                         "namespace must be idle during label update\n");
344         if (dev->driver || to_ndns(dev)->claim)
345                 return 0;
346
347         /*
348          * Only allow label writes that will result in a valid namespace
349          * or deletion of an existing namespace.
350          */
351         if (is_namespace_pmem(dev)) {
352                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
353                 resource_size_t size = resource_size(&nspm->nsio.res);
354
355                 if (size == 0 && nspm->uuid)
356                         /* delete allocation */;
357                 else if (!nspm->uuid)
358                         return 0;
359
360                 return nd_pmem_namespace_label_update(nd_region, nspm, size);
361         } else if (is_namespace_blk(dev)) {
362                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
363                 resource_size_t size = nd_namespace_blk_size(nsblk);
364
365                 if (size == 0 && nsblk->uuid)
366                         /* delete allocation */;
367                 else if (!nsblk->uuid || !nsblk->lbasize)
368                         return 0;
369
370                 return nd_blk_namespace_label_update(nd_region, nsblk, size);
371         } else
372                 return -ENXIO;
373 }
374
375 static ssize_t alt_name_store(struct device *dev,
376                 struct device_attribute *attr, const char *buf, size_t len)
377 {
378         struct nd_region *nd_region = to_nd_region(dev->parent);
379         ssize_t rc;
380
381         device_lock(dev);
382         nvdimm_bus_lock(dev);
383         wait_nvdimm_bus_probe_idle(dev);
384         rc = __alt_name_store(dev, buf, len);
385         if (rc >= 0)
386                 rc = nd_namespace_label_update(nd_region, dev);
387         dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc);
388         nvdimm_bus_unlock(dev);
389         device_unlock(dev);
390
391         return rc < 0 ? rc : len;
392 }
393
394 static ssize_t alt_name_show(struct device *dev,
395                 struct device_attribute *attr, char *buf)
396 {
397         char *ns_altname;
398
399         if (is_namespace_pmem(dev)) {
400                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
401
402                 ns_altname = nspm->alt_name;
403         } else if (is_namespace_blk(dev)) {
404                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
405
406                 ns_altname = nsblk->alt_name;
407         } else
408                 return -ENXIO;
409
410         return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
411 }
412 static DEVICE_ATTR_RW(alt_name);
413
414 static int scan_free(struct nd_region *nd_region,
415                 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
416                 resource_size_t n)
417 {
418         bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
419         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
420         int rc = 0;
421
422         while (n) {
423                 struct resource *res, *last;
424                 resource_size_t new_start;
425
426                 last = NULL;
427                 for_each_dpa_resource(ndd, res)
428                         if (strcmp(res->name, label_id->id) == 0)
429                                 last = res;
430                 res = last;
431                 if (!res)
432                         return 0;
433
434                 if (n >= resource_size(res)) {
435                         n -= resource_size(res);
436                         nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
437                         nvdimm_free_dpa(ndd, res);
438                         /* retry with last resource deleted */
439                         continue;
440                 }
441
442                 /*
443                  * Keep BLK allocations relegated to high DPA as much as
444                  * possible
445                  */
446                 if (is_blk)
447                         new_start = res->start + n;
448                 else
449                         new_start = res->start;
450
451                 rc = adjust_resource(res, new_start, resource_size(res) - n);
452                 if (rc == 0)
453                         res->flags |= DPA_RESOURCE_ADJUSTED;
454                 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
455                 break;
456         }
457
458         return rc;
459 }
460
461 /**
462  * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
463  * @nd_region: the set of dimms to reclaim @n bytes from
464  * @label_id: unique identifier for the namespace consuming this dpa range
465  * @n: number of bytes per-dimm to release
466  *
467  * Assumes resources are ordered.  Starting from the end try to
468  * adjust_resource() the allocation to @n, but if @n is larger than the
469  * allocation delete it and find the 'new' last allocation in the label
470  * set.
471  */
472 static int shrink_dpa_allocation(struct nd_region *nd_region,
473                 struct nd_label_id *label_id, resource_size_t n)
474 {
475         int i;
476
477         for (i = 0; i < nd_region->ndr_mappings; i++) {
478                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
479                 int rc;
480
481                 rc = scan_free(nd_region, nd_mapping, label_id, n);
482                 if (rc)
483                         return rc;
484         }
485
486         return 0;
487 }
488
489 static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
490                 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
491                 resource_size_t n)
492 {
493         bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
494         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
495         resource_size_t first_dpa;
496         struct resource *res;
497         int rc = 0;
498
499         /* allocate blk from highest dpa first */
500         if (is_blk)
501                 first_dpa = nd_mapping->start + nd_mapping->size - n;
502         else
503                 first_dpa = nd_mapping->start;
504
505         /* first resource allocation for this label-id or dimm */
506         res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
507         if (!res)
508                 rc = -EBUSY;
509
510         nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
511         return rc ? n : 0;
512 }
513
514 static bool space_valid(bool is_pmem, bool is_reserve,
515                 struct nd_label_id *label_id, struct resource *res)
516 {
517         /*
518          * For BLK-space any space is valid, for PMEM-space, it must be
519          * contiguous with an existing allocation unless we are
520          * reserving pmem.
521          */
522         if (is_reserve || !is_pmem)
523                 return true;
524         if (!res || strcmp(res->name, label_id->id) == 0)
525                 return true;
526         return false;
527 }
528
529 enum alloc_loc {
530         ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
531 };
532
533 static resource_size_t scan_allocate(struct nd_region *nd_region,
534                 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
535                 resource_size_t n)
536 {
537         resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
538         bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
539         bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
540         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
541         const resource_size_t to_allocate = n;
542         struct resource *res;
543         int first;
544
545  retry:
546         first = 0;
547         for_each_dpa_resource(ndd, res) {
548                 resource_size_t allocate, available = 0, free_start, free_end;
549                 struct resource *next = res->sibling, *new_res = NULL;
550                 enum alloc_loc loc = ALLOC_ERR;
551                 const char *action;
552                 int rc = 0;
553
554                 /* ignore resources outside this nd_mapping */
555                 if (res->start > mapping_end)
556                         continue;
557                 if (res->end < nd_mapping->start)
558                         continue;
559
560                 /* space at the beginning of the mapping */
561                 if (!first++ && res->start > nd_mapping->start) {
562                         free_start = nd_mapping->start;
563                         available = res->start - free_start;
564                         if (space_valid(is_pmem, is_reserve, label_id, NULL))
565                                 loc = ALLOC_BEFORE;
566                 }
567
568                 /* space between allocations */
569                 if (!loc && next) {
570                         free_start = res->start + resource_size(res);
571                         free_end = min(mapping_end, next->start - 1);
572                         if (space_valid(is_pmem, is_reserve, label_id, res)
573                                         && free_start < free_end) {
574                                 available = free_end + 1 - free_start;
575                                 loc = ALLOC_MID;
576                         }
577                 }
578
579                 /* space at the end of the mapping */
580                 if (!loc && !next) {
581                         free_start = res->start + resource_size(res);
582                         free_end = mapping_end;
583                         if (space_valid(is_pmem, is_reserve, label_id, res)
584                                         && free_start < free_end) {
585                                 available = free_end + 1 - free_start;
586                                 loc = ALLOC_AFTER;
587                         }
588                 }
589
590                 if (!loc || !available)
591                         continue;
592                 allocate = min(available, n);
593                 switch (loc) {
594                 case ALLOC_BEFORE:
595                         if (strcmp(res->name, label_id->id) == 0) {
596                                 /* adjust current resource up */
597                                 if (is_pmem && !is_reserve)
598                                         return n;
599                                 rc = adjust_resource(res, res->start - allocate,
600                                                 resource_size(res) + allocate);
601                                 action = "cur grow up";
602                         } else
603                                 action = "allocate";
604                         break;
605                 case ALLOC_MID:
606                         if (strcmp(next->name, label_id->id) == 0) {
607                                 /* adjust next resource up */
608                                 if (is_pmem && !is_reserve)
609                                         return n;
610                                 rc = adjust_resource(next, next->start
611                                                 - allocate, resource_size(next)
612                                                 + allocate);
613                                 new_res = next;
614                                 action = "next grow up";
615                         } else if (strcmp(res->name, label_id->id) == 0) {
616                                 action = "grow down";
617                         } else
618                                 action = "allocate";
619                         break;
620                 case ALLOC_AFTER:
621                         if (strcmp(res->name, label_id->id) == 0)
622                                 action = "grow down";
623                         else
624                                 action = "allocate";
625                         break;
626                 default:
627                         return n;
628                 }
629
630                 if (strcmp(action, "allocate") == 0) {
631                         /* BLK allocate bottom up */
632                         if (!is_pmem)
633                                 free_start += available - allocate;
634                         else if (!is_reserve && free_start != nd_mapping->start)
635                                 return n;
636
637                         new_res = nvdimm_allocate_dpa(ndd, label_id,
638                                         free_start, allocate);
639                         if (!new_res)
640                                 rc = -EBUSY;
641                 } else if (strcmp(action, "grow down") == 0) {
642                         /* adjust current resource down */
643                         rc = adjust_resource(res, res->start, resource_size(res)
644                                         + allocate);
645                         if (rc == 0)
646                                 res->flags |= DPA_RESOURCE_ADJUSTED;
647                 }
648
649                 if (!new_res)
650                         new_res = res;
651
652                 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
653                                 action, loc, rc);
654
655                 if (rc)
656                         return n;
657
658                 n -= allocate;
659                 if (n) {
660                         /*
661                          * Retry scan with newly inserted resources.
662                          * For example, if we did an ALLOC_BEFORE
663                          * insertion there may also have been space
664                          * available for an ALLOC_AFTER insertion, so we
665                          * need to check this same resource again
666                          */
667                         goto retry;
668                 } else
669                         return 0;
670         }
671
672         /*
673          * If we allocated nothing in the BLK case it may be because we are in
674          * an initial "pmem-reserve pass".  Only do an initial BLK allocation
675          * when none of the DPA space is reserved.
676          */
677         if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
678                 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
679         return n;
680 }
681
682 static int merge_dpa(struct nd_region *nd_region,
683                 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
684 {
685         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
686         struct resource *res;
687
688         if (strncmp("pmem", label_id->id, 4) == 0)
689                 return 0;
690  retry:
691         for_each_dpa_resource(ndd, res) {
692                 int rc;
693                 struct resource *next = res->sibling;
694                 resource_size_t end = res->start + resource_size(res);
695
696                 if (!next || strcmp(res->name, label_id->id) != 0
697                                 || strcmp(next->name, label_id->id) != 0
698                                 || end != next->start)
699                         continue;
700                 end += resource_size(next);
701                 nvdimm_free_dpa(ndd, next);
702                 rc = adjust_resource(res, res->start, end - res->start);
703                 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
704                 if (rc)
705                         return rc;
706                 res->flags |= DPA_RESOURCE_ADJUSTED;
707                 goto retry;
708         }
709
710         return 0;
711 }
712
713 static int __reserve_free_pmem(struct device *dev, void *data)
714 {
715         struct nvdimm *nvdimm = data;
716         struct nd_region *nd_region;
717         struct nd_label_id label_id;
718         int i;
719
720         if (!is_nd_pmem(dev))
721                 return 0;
722
723         nd_region = to_nd_region(dev);
724         if (nd_region->ndr_mappings == 0)
725                 return 0;
726
727         memset(&label_id, 0, sizeof(label_id));
728         strcat(label_id.id, "pmem-reserve");
729         for (i = 0; i < nd_region->ndr_mappings; i++) {
730                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
731                 resource_size_t n, rem = 0;
732
733                 if (nd_mapping->nvdimm != nvdimm)
734                         continue;
735
736                 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
737                 if (n == 0)
738                         return 0;
739                 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
740                 dev_WARN_ONCE(&nd_region->dev, rem,
741                                 "pmem reserve underrun: %#llx of %#llx bytes\n",
742                                 (unsigned long long) n - rem,
743                                 (unsigned long long) n);
744                 return rem ? -ENXIO : 0;
745         }
746
747         return 0;
748 }
749
750 static void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
751                 struct nd_mapping *nd_mapping)
752 {
753         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
754         struct resource *res, *_res;
755
756         for_each_dpa_resource_safe(ndd, res, _res)
757                 if (strcmp(res->name, "pmem-reserve") == 0)
758                         nvdimm_free_dpa(ndd, res);
759 }
760
761 static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
762                 struct nd_mapping *nd_mapping)
763 {
764         struct nvdimm *nvdimm = nd_mapping->nvdimm;
765         int rc;
766
767         rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
768                         __reserve_free_pmem);
769         if (rc)
770                 release_free_pmem(nvdimm_bus, nd_mapping);
771         return rc;
772 }
773
774 /**
775  * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
776  * @nd_region: the set of dimms to allocate @n more bytes from
777  * @label_id: unique identifier for the namespace consuming this dpa range
778  * @n: number of bytes per-dimm to add to the existing allocation
779  *
780  * Assumes resources are ordered.  For BLK regions, first consume
781  * BLK-only available DPA free space, then consume PMEM-aliased DPA
782  * space starting at the highest DPA.  For PMEM regions start
783  * allocations from the start of an interleave set and end at the first
784  * BLK allocation or the end of the interleave set, whichever comes
785  * first.
786  */
787 static int grow_dpa_allocation(struct nd_region *nd_region,
788                 struct nd_label_id *label_id, resource_size_t n)
789 {
790         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
791         bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
792         int i;
793
794         for (i = 0; i < nd_region->ndr_mappings; i++) {
795                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
796                 resource_size_t rem = n;
797                 int rc, j;
798
799                 /*
800                  * In the BLK case try once with all unallocated PMEM
801                  * reserved, and once without
802                  */
803                 for (j = is_pmem; j < 2; j++) {
804                         bool blk_only = j == 0;
805
806                         if (blk_only) {
807                                 rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
808                                 if (rc)
809                                         return rc;
810                         }
811                         rem = scan_allocate(nd_region, nd_mapping,
812                                         label_id, rem);
813                         if (blk_only)
814                                 release_free_pmem(nvdimm_bus, nd_mapping);
815
816                         /* try again and allow encroachments into PMEM */
817                         if (rem == 0)
818                                 break;
819                 }
820
821                 dev_WARN_ONCE(&nd_region->dev, rem,
822                                 "allocation underrun: %#llx of %#llx bytes\n",
823                                 (unsigned long long) n - rem,
824                                 (unsigned long long) n);
825                 if (rem)
826                         return -ENXIO;
827
828                 rc = merge_dpa(nd_region, nd_mapping, label_id);
829                 if (rc)
830                         return rc;
831         }
832
833         return 0;
834 }
835
836 static void nd_namespace_pmem_set_size(struct nd_region *nd_region,
837                 struct nd_namespace_pmem *nspm, resource_size_t size)
838 {
839         struct resource *res = &nspm->nsio.res;
840
841         res->start = nd_region->ndr_start;
842         res->end = nd_region->ndr_start + size - 1;
843 }
844
845 static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
846 {
847         if (!uuid) {
848                 dev_dbg(dev, "%s: uuid not set\n", where);
849                 return true;
850         }
851         return false;
852 }
853
854 static ssize_t __size_store(struct device *dev, unsigned long long val)
855 {
856         resource_size_t allocated = 0, available = 0;
857         struct nd_region *nd_region = to_nd_region(dev->parent);
858         struct nd_mapping *nd_mapping;
859         struct nvdimm_drvdata *ndd;
860         struct nd_label_id label_id;
861         u32 flags = 0, remainder;
862         u8 *uuid = NULL;
863         int rc, i;
864
865         if (dev->driver || to_ndns(dev)->claim)
866                 return -EBUSY;
867
868         if (is_namespace_pmem(dev)) {
869                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
870
871                 uuid = nspm->uuid;
872         } else if (is_namespace_blk(dev)) {
873                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
874
875                 uuid = nsblk->uuid;
876                 flags = NSLABEL_FLAG_LOCAL;
877         }
878
879         /*
880          * We need a uuid for the allocation-label and dimm(s) on which
881          * to store the label.
882          */
883         if (uuid_not_set(uuid, dev, __func__))
884                 return -ENXIO;
885         if (nd_region->ndr_mappings == 0) {
886                 dev_dbg(dev, "%s: not associated with dimm(s)\n", __func__);
887                 return -ENXIO;
888         }
889
890         div_u64_rem(val, SZ_4K * nd_region->ndr_mappings, &remainder);
891         if (remainder) {
892                 dev_dbg(dev, "%llu is not %dK aligned\n", val,
893                                 (SZ_4K * nd_region->ndr_mappings) / SZ_1K);
894                 return -EINVAL;
895         }
896
897         nd_label_gen_id(&label_id, uuid, flags);
898         for (i = 0; i < nd_region->ndr_mappings; i++) {
899                 nd_mapping = &nd_region->mapping[i];
900                 ndd = to_ndd(nd_mapping);
901
902                 /*
903                  * All dimms in an interleave set, or the base dimm for a blk
904                  * region, need to be enabled for the size to be changed.
905                  */
906                 if (!ndd)
907                         return -ENXIO;
908
909                 allocated += nvdimm_allocated_dpa(ndd, &label_id);
910         }
911         available = nd_region_available_dpa(nd_region);
912
913         if (val > available + allocated)
914                 return -ENOSPC;
915
916         if (val == allocated)
917                 return 0;
918
919         val = div_u64(val, nd_region->ndr_mappings);
920         allocated = div_u64(allocated, nd_region->ndr_mappings);
921         if (val < allocated)
922                 rc = shrink_dpa_allocation(nd_region, &label_id,
923                                 allocated - val);
924         else
925                 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
926
927         if (rc)
928                 return rc;
929
930         if (is_namespace_pmem(dev)) {
931                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
932
933                 nd_namespace_pmem_set_size(nd_region, nspm,
934                                 val * nd_region->ndr_mappings);
935         } else if (is_namespace_blk(dev)) {
936                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
937
938                 /*
939                  * Try to delete the namespace if we deleted all of its
940                  * allocation, this is not the seed device for the
941                  * region, and it is not actively claimed by a btt
942                  * instance.
943                  */
944                 if (val == 0 && nd_region->ns_seed != dev
945                                 && !nsblk->common.claim)
946                         nd_device_unregister(dev, ND_ASYNC);
947         }
948
949         return rc;
950 }
951
952 static ssize_t size_store(struct device *dev,
953                 struct device_attribute *attr, const char *buf, size_t len)
954 {
955         struct nd_region *nd_region = to_nd_region(dev->parent);
956         unsigned long long val;
957         u8 **uuid = NULL;
958         int rc;
959
960         rc = kstrtoull(buf, 0, &val);
961         if (rc)
962                 return rc;
963
964         device_lock(dev);
965         nvdimm_bus_lock(dev);
966         wait_nvdimm_bus_probe_idle(dev);
967         rc = __size_store(dev, val);
968         if (rc >= 0)
969                 rc = nd_namespace_label_update(nd_region, dev);
970
971         if (is_namespace_pmem(dev)) {
972                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
973
974                 uuid = &nspm->uuid;
975         } else if (is_namespace_blk(dev)) {
976                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
977
978                 uuid = &nsblk->uuid;
979         }
980
981         if (rc == 0 && val == 0 && uuid) {
982                 /* setting size zero == 'delete namespace' */
983                 kfree(*uuid);
984                 *uuid = NULL;
985         }
986
987         dev_dbg(dev, "%s: %llx %s (%d)\n", __func__, val, rc < 0
988                         ? "fail" : "success", rc);
989
990         nvdimm_bus_unlock(dev);
991         device_unlock(dev);
992
993         return rc < 0 ? rc : len;
994 }
995
996 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
997 {
998         struct device *dev = &ndns->dev;
999
1000         if (is_namespace_pmem(dev)) {
1001                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1002
1003                 return resource_size(&nspm->nsio.res);
1004         } else if (is_namespace_blk(dev)) {
1005                 return nd_namespace_blk_size(to_nd_namespace_blk(dev));
1006         } else if (is_namespace_io(dev)) {
1007                 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1008
1009                 return resource_size(&nsio->res);
1010         } else
1011                 WARN_ONCE(1, "unknown namespace type\n");
1012         return 0;
1013 }
1014
1015 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1016 {
1017         resource_size_t size;
1018
1019         nvdimm_bus_lock(&ndns->dev);
1020         size = __nvdimm_namespace_capacity(ndns);
1021         nvdimm_bus_unlock(&ndns->dev);
1022
1023         return size;
1024 }
1025 EXPORT_SYMBOL(nvdimm_namespace_capacity);
1026
1027 static ssize_t size_show(struct device *dev,
1028                 struct device_attribute *attr, char *buf)
1029 {
1030         return sprintf(buf, "%llu\n", (unsigned long long)
1031                         nvdimm_namespace_capacity(to_ndns(dev)));
1032 }
1033 static DEVICE_ATTR(size, S_IRUGO, size_show, size_store);
1034
1035 static ssize_t uuid_show(struct device *dev,
1036                 struct device_attribute *attr, char *buf)
1037 {
1038         u8 *uuid;
1039
1040         if (is_namespace_pmem(dev)) {
1041                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1042
1043                 uuid = nspm->uuid;
1044         } else if (is_namespace_blk(dev)) {
1045                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1046
1047                 uuid = nsblk->uuid;
1048         } else
1049                 return -ENXIO;
1050
1051         if (uuid)
1052                 return sprintf(buf, "%pUb\n", uuid);
1053         return sprintf(buf, "\n");
1054 }
1055
1056 /**
1057  * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
1058  * @nd_region: parent region so we can updates all dimms in the set
1059  * @dev: namespace type for generating label_id
1060  * @new_uuid: incoming uuid
1061  * @old_uuid: reference to the uuid storage location in the namespace object
1062  */
1063 static int namespace_update_uuid(struct nd_region *nd_region,
1064                 struct device *dev, u8 *new_uuid, u8 **old_uuid)
1065 {
1066         u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
1067         struct nd_label_id old_label_id;
1068         struct nd_label_id new_label_id;
1069         int i;
1070
1071         if (!nd_is_uuid_unique(dev, new_uuid))
1072                 return -EINVAL;
1073
1074         if (*old_uuid == NULL)
1075                 goto out;
1076
1077         /*
1078          * If we've already written a label with this uuid, then it's
1079          * too late to rename because we can't reliably update the uuid
1080          * without losing the old namespace.  Userspace must delete this
1081          * namespace to abandon the old uuid.
1082          */
1083         for (i = 0; i < nd_region->ndr_mappings; i++) {
1084                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1085
1086                 /*
1087                  * This check by itself is sufficient because old_uuid
1088                  * would be NULL above if this uuid did not exist in the
1089                  * currently written set.
1090                  *
1091                  * FIXME: can we delete uuid with zero dpa allocated?
1092                  */
1093                 if (list_empty(&nd_mapping->labels))
1094                         return -EBUSY;
1095         }
1096
1097         nd_label_gen_id(&old_label_id, *old_uuid, flags);
1098         nd_label_gen_id(&new_label_id, new_uuid, flags);
1099         for (i = 0; i < nd_region->ndr_mappings; i++) {
1100                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1101                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1102                 struct resource *res;
1103
1104                 for_each_dpa_resource(ndd, res)
1105                         if (strcmp(res->name, old_label_id.id) == 0)
1106                                 sprintf((void *) res->name, "%s",
1107                                                 new_label_id.id);
1108         }
1109         kfree(*old_uuid);
1110  out:
1111         *old_uuid = new_uuid;
1112         return 0;
1113 }
1114
1115 static ssize_t uuid_store(struct device *dev,
1116                 struct device_attribute *attr, const char *buf, size_t len)
1117 {
1118         struct nd_region *nd_region = to_nd_region(dev->parent);
1119         u8 *uuid = NULL;
1120         ssize_t rc = 0;
1121         u8 **ns_uuid;
1122
1123         if (is_namespace_pmem(dev)) {
1124                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1125
1126                 ns_uuid = &nspm->uuid;
1127         } else if (is_namespace_blk(dev)) {
1128                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1129
1130                 ns_uuid = &nsblk->uuid;
1131         } else
1132                 return -ENXIO;
1133
1134         device_lock(dev);
1135         nvdimm_bus_lock(dev);
1136         wait_nvdimm_bus_probe_idle(dev);
1137         if (to_ndns(dev)->claim)
1138                 rc = -EBUSY;
1139         if (rc >= 0)
1140                 rc = nd_uuid_store(dev, &uuid, buf, len);
1141         if (rc >= 0)
1142                 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1143         if (rc >= 0)
1144                 rc = nd_namespace_label_update(nd_region, dev);
1145         else
1146                 kfree(uuid);
1147         dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
1148                         rc, buf, buf[len - 1] == '\n' ? "" : "\n");
1149         nvdimm_bus_unlock(dev);
1150         device_unlock(dev);
1151
1152         return rc < 0 ? rc : len;
1153 }
1154 static DEVICE_ATTR_RW(uuid);
1155
1156 static ssize_t resource_show(struct device *dev,
1157                 struct device_attribute *attr, char *buf)
1158 {
1159         struct resource *res;
1160
1161         if (is_namespace_pmem(dev)) {
1162                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1163
1164                 res = &nspm->nsio.res;
1165         } else if (is_namespace_io(dev)) {
1166                 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1167
1168                 res = &nsio->res;
1169         } else
1170                 return -ENXIO;
1171
1172         /* no address to convey if the namespace has no allocation */
1173         if (resource_size(res) == 0)
1174                 return -ENXIO;
1175         return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1176 }
1177 static DEVICE_ATTR_RO(resource);
1178
1179 static const unsigned long ns_lbasize_supported[] = { 512, 520, 528,
1180         4096, 4104, 4160, 4224, 0 };
1181
1182 static ssize_t sector_size_show(struct device *dev,
1183                 struct device_attribute *attr, char *buf)
1184 {
1185         struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1186
1187         if (!is_namespace_blk(dev))
1188                 return -ENXIO;
1189
1190         return nd_sector_size_show(nsblk->lbasize, ns_lbasize_supported, buf);
1191 }
1192
1193 static ssize_t sector_size_store(struct device *dev,
1194                 struct device_attribute *attr, const char *buf, size_t len)
1195 {
1196         struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1197         struct nd_region *nd_region = to_nd_region(dev->parent);
1198         ssize_t rc = 0;
1199
1200         if (!is_namespace_blk(dev))
1201                 return -ENXIO;
1202
1203         device_lock(dev);
1204         nvdimm_bus_lock(dev);
1205         if (to_ndns(dev)->claim)
1206                 rc = -EBUSY;
1207         if (rc >= 0)
1208                 rc = nd_sector_size_store(dev, buf, &nsblk->lbasize,
1209                                 ns_lbasize_supported);
1210         if (rc >= 0)
1211                 rc = nd_namespace_label_update(nd_region, dev);
1212         dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__,
1213                         rc, rc < 0 ? "tried" : "wrote", buf,
1214                         buf[len - 1] == '\n' ? "" : "\n");
1215         nvdimm_bus_unlock(dev);
1216         device_unlock(dev);
1217
1218         return rc ? rc : len;
1219 }
1220 static DEVICE_ATTR_RW(sector_size);
1221
1222 static ssize_t dpa_extents_show(struct device *dev,
1223                 struct device_attribute *attr, char *buf)
1224 {
1225         struct nd_region *nd_region = to_nd_region(dev->parent);
1226         struct nd_label_id label_id;
1227         int count = 0, i;
1228         u8 *uuid = NULL;
1229         u32 flags = 0;
1230
1231         nvdimm_bus_lock(dev);
1232         if (is_namespace_pmem(dev)) {
1233                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1234
1235                 uuid = nspm->uuid;
1236                 flags = 0;
1237         } else if (is_namespace_blk(dev)) {
1238                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1239
1240                 uuid = nsblk->uuid;
1241                 flags = NSLABEL_FLAG_LOCAL;
1242         }
1243
1244         if (!uuid)
1245                 goto out;
1246
1247         nd_label_gen_id(&label_id, uuid, flags);
1248         for (i = 0; i < nd_region->ndr_mappings; i++) {
1249                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1250                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1251                 struct resource *res;
1252
1253                 for_each_dpa_resource(ndd, res)
1254                         if (strcmp(res->name, label_id.id) == 0)
1255                                 count++;
1256         }
1257  out:
1258         nvdimm_bus_unlock(dev);
1259
1260         return sprintf(buf, "%d\n", count);
1261 }
1262 static DEVICE_ATTR_RO(dpa_extents);
1263
1264 static ssize_t holder_show(struct device *dev,
1265                 struct device_attribute *attr, char *buf)
1266 {
1267         struct nd_namespace_common *ndns = to_ndns(dev);
1268         ssize_t rc;
1269
1270         device_lock(dev);
1271         rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1272         device_unlock(dev);
1273
1274         return rc;
1275 }
1276 static DEVICE_ATTR_RO(holder);
1277
1278 static ssize_t mode_show(struct device *dev,
1279                 struct device_attribute *attr, char *buf)
1280 {
1281         struct nd_namespace_common *ndns = to_ndns(dev);
1282         struct device *claim;
1283         char *mode;
1284         ssize_t rc;
1285
1286         device_lock(dev);
1287         claim = ndns->claim;
1288         if (claim && is_nd_btt(claim))
1289                 mode = "safe";
1290         else if (claim && is_nd_pfn(claim))
1291                 mode = "memory";
1292         else if (claim && is_nd_dax(claim))
1293                 mode = "dax";
1294         else if (!claim && pmem_should_map_pages(dev))
1295                 mode = "memory";
1296         else
1297                 mode = "raw";
1298         rc = sprintf(buf, "%s\n", mode);
1299         device_unlock(dev);
1300
1301         return rc;
1302 }
1303 static DEVICE_ATTR_RO(mode);
1304
1305 static ssize_t force_raw_store(struct device *dev,
1306                 struct device_attribute *attr, const char *buf, size_t len)
1307 {
1308         bool force_raw;
1309         int rc = strtobool(buf, &force_raw);
1310
1311         if (rc)
1312                 return rc;
1313
1314         to_ndns(dev)->force_raw = force_raw;
1315         return len;
1316 }
1317
1318 static ssize_t force_raw_show(struct device *dev,
1319                 struct device_attribute *attr, char *buf)
1320 {
1321         return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1322 }
1323 static DEVICE_ATTR_RW(force_raw);
1324
1325 static struct attribute *nd_namespace_attributes[] = {
1326         &dev_attr_nstype.attr,
1327         &dev_attr_size.attr,
1328         &dev_attr_mode.attr,
1329         &dev_attr_uuid.attr,
1330         &dev_attr_holder.attr,
1331         &dev_attr_resource.attr,
1332         &dev_attr_alt_name.attr,
1333         &dev_attr_force_raw.attr,
1334         &dev_attr_sector_size.attr,
1335         &dev_attr_dpa_extents.attr,
1336         NULL,
1337 };
1338
1339 static umode_t namespace_visible(struct kobject *kobj,
1340                 struct attribute *a, int n)
1341 {
1342         struct device *dev = container_of(kobj, struct device, kobj);
1343
1344         if (a == &dev_attr_resource.attr) {
1345                 if (is_namespace_blk(dev))
1346                         return 0;
1347                 return a->mode;
1348         }
1349
1350         if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1351                 if (a == &dev_attr_size.attr)
1352                         return S_IWUSR | S_IRUGO;
1353
1354                 if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr)
1355                         return 0;
1356
1357                 return a->mode;
1358         }
1359
1360         if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
1361                         || a == &dev_attr_holder.attr
1362                         || a == &dev_attr_force_raw.attr
1363                         || a == &dev_attr_mode.attr)
1364                 return a->mode;
1365
1366         return 0;
1367 }
1368
1369 static struct attribute_group nd_namespace_attribute_group = {
1370         .attrs = nd_namespace_attributes,
1371         .is_visible = namespace_visible,
1372 };
1373
1374 static const struct attribute_group *nd_namespace_attribute_groups[] = {
1375         &nd_device_attribute_group,
1376         &nd_namespace_attribute_group,
1377         &nd_numa_attribute_group,
1378         NULL,
1379 };
1380
1381 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1382 {
1383         struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1384         struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
1385         struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
1386         struct nd_namespace_common *ndns = NULL;
1387         resource_size_t size;
1388
1389         if (nd_btt || nd_pfn || nd_dax) {
1390                 if (nd_btt)
1391                         ndns = nd_btt->ndns;
1392                 else if (nd_pfn)
1393                         ndns = nd_pfn->ndns;
1394                 else if (nd_dax)
1395                         ndns = nd_dax->nd_pfn.ndns;
1396
1397                 if (!ndns)
1398                         return ERR_PTR(-ENODEV);
1399
1400                 /*
1401                  * Flush any in-progess probes / removals in the driver
1402                  * for the raw personality of this namespace.
1403                  */
1404                 device_lock(&ndns->dev);
1405                 device_unlock(&ndns->dev);
1406                 if (ndns->dev.driver) {
1407                         dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1408                                         dev_name(dev));
1409                         return ERR_PTR(-EBUSY);
1410                 }
1411                 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
1412                                         "host (%s) vs claim (%s) mismatch\n",
1413                                         dev_name(dev),
1414                                         dev_name(ndns->claim)))
1415                         return ERR_PTR(-ENXIO);
1416         } else {
1417                 ndns = to_ndns(dev);
1418                 if (ndns->claim) {
1419                         dev_dbg(dev, "claimed by %s, failing probe\n",
1420                                 dev_name(ndns->claim));
1421
1422                         return ERR_PTR(-ENXIO);
1423                 }
1424         }
1425
1426         size = nvdimm_namespace_capacity(ndns);
1427         if (size < ND_MIN_NAMESPACE_SIZE) {
1428                 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1429                                 &size, ND_MIN_NAMESPACE_SIZE);
1430                 return ERR_PTR(-ENODEV);
1431         }
1432
1433         if (is_namespace_pmem(&ndns->dev)) {
1434                 struct nd_namespace_pmem *nspm;
1435
1436                 nspm = to_nd_namespace_pmem(&ndns->dev);
1437                 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
1438                         return ERR_PTR(-ENODEV);
1439         } else if (is_namespace_blk(&ndns->dev)) {
1440                 struct nd_namespace_blk *nsblk;
1441
1442                 nsblk = to_nd_namespace_blk(&ndns->dev);
1443                 if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
1444                         return ERR_PTR(-ENODEV);
1445                 if (!nsblk->lbasize) {
1446                         dev_dbg(&ndns->dev, "%s: sector size not set\n",
1447                                 __func__);
1448                         return ERR_PTR(-ENODEV);
1449                 }
1450                 if (!nd_namespace_blk_validate(nsblk))
1451                         return ERR_PTR(-ENODEV);
1452         }
1453
1454         return ndns;
1455 }
1456 EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1457
1458 static struct device **create_namespace_io(struct nd_region *nd_region)
1459 {
1460         struct nd_namespace_io *nsio;
1461         struct device *dev, **devs;
1462         struct resource *res;
1463
1464         nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1465         if (!nsio)
1466                 return NULL;
1467
1468         devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1469         if (!devs) {
1470                 kfree(nsio);
1471                 return NULL;
1472         }
1473
1474         dev = &nsio->common.dev;
1475         dev->type = &namespace_io_device_type;
1476         dev->parent = &nd_region->dev;
1477         res = &nsio->res;
1478         res->name = dev_name(&nd_region->dev);
1479         res->flags = IORESOURCE_MEM;
1480         res->start = nd_region->ndr_start;
1481         res->end = res->start + nd_region->ndr_size - 1;
1482
1483         devs[0] = dev;
1484         return devs;
1485 }
1486
1487 static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1488                 u64 cookie, u16 pos)
1489 {
1490         struct nd_namespace_label *found = NULL;
1491         int i;
1492
1493         for (i = 0; i < nd_region->ndr_mappings; i++) {
1494                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1495                 struct nd_label_ent *label_ent;
1496                 bool found_uuid = false;
1497
1498                 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1499                         struct nd_namespace_label *nd_label = label_ent->label;
1500                         u16 position, nlabel;
1501                         u64 isetcookie;
1502
1503                         if (!nd_label)
1504                                 continue;
1505                         isetcookie = __le64_to_cpu(nd_label->isetcookie);
1506                         position = __le16_to_cpu(nd_label->position);
1507                         nlabel = __le16_to_cpu(nd_label->nlabel);
1508
1509                         if (isetcookie != cookie)
1510                                 continue;
1511
1512                         if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
1513                                 continue;
1514
1515                         if (found_uuid) {
1516                                 dev_dbg(to_ndd(nd_mapping)->dev,
1517                                                 "%s duplicate entry for uuid\n",
1518                                                 __func__);
1519                                 return false;
1520                         }
1521                         found_uuid = true;
1522                         if (nlabel != nd_region->ndr_mappings)
1523                                 continue;
1524                         if (position != pos)
1525                                 continue;
1526                         found = nd_label;
1527                         break;
1528                 }
1529                 if (found)
1530                         break;
1531         }
1532         return found != NULL;
1533 }
1534
1535 static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1536 {
1537         int i;
1538
1539         if (!pmem_id)
1540                 return -ENODEV;
1541
1542         for (i = 0; i < nd_region->ndr_mappings; i++) {
1543                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1544                 struct nd_namespace_label *nd_label = NULL;
1545                 u64 hw_start, hw_end, pmem_start, pmem_end;
1546                 struct nd_label_ent *label_ent;
1547
1548                 mutex_lock(&nd_mapping->lock);
1549                 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1550                         nd_label = label_ent->label;
1551                         if (!nd_label)
1552                                 continue;
1553                         if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
1554                                 break;
1555                         nd_label = NULL;
1556                 }
1557                 mutex_unlock(&nd_mapping->lock);
1558
1559                 if (!nd_label) {
1560                         WARN_ON(1);
1561                         return -EINVAL;
1562                 }
1563
1564                 /*
1565                  * Check that this label is compliant with the dpa
1566                  * range published in NFIT
1567                  */
1568                 hw_start = nd_mapping->start;
1569                 hw_end = hw_start + nd_mapping->size;
1570                 pmem_start = __le64_to_cpu(nd_label->dpa);
1571                 pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
1572                 if (pmem_start == hw_start && pmem_end <= hw_end)
1573                         /* pass */;
1574                 else
1575                         return -EINVAL;
1576
1577                 mutex_lock(&nd_mapping->lock);
1578                 label_ent = list_first_entry(&nd_mapping->labels,
1579                                 typeof(*label_ent), list);
1580                 label_ent->label = nd_label;
1581                 list_del(&label_ent->list);
1582                 nd_mapping_free_labels(nd_mapping);
1583                 list_add(&label_ent->list, &nd_mapping->labels);
1584                 mutex_unlock(&nd_mapping->lock);
1585         }
1586         return 0;
1587 }
1588
1589 /**
1590  * find_pmem_label_set - validate interleave set labelling, retrieve label0
1591  * @nd_region: region with mappings to validate
1592  */
1593 static int find_pmem_label_set(struct nd_region *nd_region,
1594                 struct nd_namespace_pmem *nspm)
1595 {
1596         u64 cookie = nd_region_interleave_set_cookie(nd_region);
1597         u8 select_id[NSLABEL_UUID_LEN];
1598         struct nd_label_ent *label_ent;
1599         struct nd_mapping *nd_mapping;
1600         resource_size_t size = 0;
1601         u8 *pmem_id = NULL;
1602         int rc = 0;
1603         u16 i;
1604
1605         if (cookie == 0) {
1606                 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
1607                 return -ENXIO;
1608         }
1609
1610         /*
1611          * Find a complete set of labels by uuid.  By definition we can start
1612          * with any mapping as the reference label
1613          */
1614         for (i = 0; i < nd_region->ndr_mappings; i++) {
1615                 nd_mapping = &nd_region->mapping[i];
1616                 mutex_lock_nested(&nd_mapping->lock, i);
1617         }
1618         list_for_each_entry(label_ent, &nd_region->mapping[0].labels, list) {
1619                 struct nd_namespace_label *nd_label = label_ent->label;
1620
1621                 if (!nd_label)
1622                         continue;
1623                 if (__le64_to_cpu(nd_label->isetcookie) != cookie)
1624                         continue;
1625
1626                 for (i = 0; i < nd_region->ndr_mappings; i++)
1627                         if (!has_uuid_at_pos(nd_region, nd_label->uuid,
1628                                                 cookie, i))
1629                                 break;
1630                 if (i < nd_region->ndr_mappings) {
1631                         /*
1632                          * Give up if we don't find an instance of a
1633                          * uuid at each position (from 0 to
1634                          * nd_region->ndr_mappings - 1), or if we find a
1635                          * dimm with two instances of the same uuid.
1636                          */
1637                         rc = -EINVAL;
1638                         break;
1639                 } else if (pmem_id) {
1640                         /*
1641                          * If there is more than one valid uuid set, we
1642                          * need userspace to clean this up.
1643                          */
1644                         rc = -EBUSY;
1645                         break;
1646                 }
1647                 memcpy(select_id, nd_label->uuid, NSLABEL_UUID_LEN);
1648                 pmem_id = select_id;
1649         }
1650         for (i = 0; i < nd_region->ndr_mappings; i++) {
1651                 int reverse = nd_region->ndr_mappings - 1 - i;
1652
1653                 nd_mapping = &nd_region->mapping[reverse];
1654                 mutex_unlock(&nd_mapping->lock);
1655         }
1656
1657         if (rc)
1658                 goto err;
1659
1660         /*
1661          * Fix up each mapping's 'labels' to have the validated pmem label for
1662          * that position at labels[0], and NULL at labels[1].  In the process,
1663          * check that the namespace aligns with interleave-set.  We know
1664          * that it does not overlap with any blk namespaces by virtue of
1665          * the dimm being enabled (i.e. nd_label_reserve_dpa()
1666          * succeeded).
1667          */
1668         rc = select_pmem_id(nd_region, pmem_id);
1669         if (rc)
1670                 goto err;
1671
1672         /* Calculate total size and populate namespace properties from label0 */
1673         for (i = 0; i < nd_region->ndr_mappings; i++) {
1674                 struct nd_namespace_label *label0;
1675
1676                 nd_mapping = &nd_region->mapping[i];
1677                 mutex_lock(&nd_mapping->lock);
1678                 label_ent = list_first_entry_or_null(&nd_mapping->labels,
1679                                 typeof(*label_ent), list);
1680                 label0 = label_ent ? label_ent->label : 0;
1681                 mutex_unlock(&nd_mapping->lock);
1682
1683                 if (!label0) {
1684                         WARN_ON(1);
1685                         continue;
1686                 }
1687
1688                 size += __le64_to_cpu(label0->rawsize);
1689                 if (__le16_to_cpu(label0->position) != 0)
1690                         continue;
1691                 WARN_ON(nspm->alt_name || nspm->uuid);
1692                 nspm->alt_name = kmemdup((void __force *) label0->name,
1693                                 NSLABEL_NAME_LEN, GFP_KERNEL);
1694                 nspm->uuid = kmemdup((void __force *) label0->uuid,
1695                                 NSLABEL_UUID_LEN, GFP_KERNEL);
1696         }
1697
1698         if (!nspm->alt_name || !nspm->uuid) {
1699                 rc = -ENOMEM;
1700                 goto err;
1701         }
1702
1703         nd_namespace_pmem_set_size(nd_region, nspm, size);
1704
1705         return 0;
1706  err:
1707         switch (rc) {
1708         case -EINVAL:
1709                 dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__);
1710                 break;
1711         case -ENODEV:
1712                 dev_dbg(&nd_region->dev, "%s: label not found\n", __func__);
1713                 break;
1714         default:
1715                 dev_dbg(&nd_region->dev, "%s: unexpected err: %d\n",
1716                                 __func__, rc);
1717                 break;
1718         }
1719         return rc;
1720 }
1721
1722 static struct device **create_namespace_pmem(struct nd_region *nd_region)
1723 {
1724         struct nd_namespace_pmem *nspm;
1725         struct device *dev, **devs;
1726         struct resource *res;
1727         int rc;
1728
1729         nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1730         if (!nspm)
1731                 return NULL;
1732
1733         dev = &nspm->nsio.common.dev;
1734         dev->type = &namespace_pmem_device_type;
1735         dev->parent = &nd_region->dev;
1736         res = &nspm->nsio.res;
1737         res->name = dev_name(&nd_region->dev);
1738         res->flags = IORESOURCE_MEM;
1739         rc = find_pmem_label_set(nd_region, nspm);
1740         if (rc == -ENODEV) {
1741                 int i;
1742
1743                 /* Pass, try to permit namespace creation... */
1744                 for (i = 0; i < nd_region->ndr_mappings; i++) {
1745                         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1746
1747                         mutex_lock(&nd_mapping->lock);
1748                         nd_mapping_free_labels(nd_mapping);
1749                         mutex_unlock(&nd_mapping->lock);
1750                 }
1751
1752                 /* Publish a zero-sized namespace for userspace to configure. */
1753                 nd_namespace_pmem_set_size(nd_region, nspm, 0);
1754
1755                 rc = 0;
1756         } else if (rc)
1757                 goto err;
1758
1759         devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1760         if (!devs)
1761                 goto err;
1762
1763         devs[0] = dev;
1764         return devs;
1765
1766  err:
1767         namespace_pmem_release(&nspm->nsio.common.dev);
1768         return NULL;
1769 }
1770
1771 struct resource *nsblk_add_resource(struct nd_region *nd_region,
1772                 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
1773                 resource_size_t start)
1774 {
1775         struct nd_label_id label_id;
1776         struct resource *res;
1777
1778         nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
1779         res = krealloc(nsblk->res,
1780                         sizeof(void *) * (nsblk->num_resources + 1),
1781                         GFP_KERNEL);
1782         if (!res)
1783                 return NULL;
1784         nsblk->res = (struct resource **) res;
1785         for_each_dpa_resource(ndd, res)
1786                 if (strcmp(res->name, label_id.id) == 0
1787                                 && res->start == start) {
1788                         nsblk->res[nsblk->num_resources++] = res;
1789                         return res;
1790                 }
1791         return NULL;
1792 }
1793
1794 static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
1795 {
1796         struct nd_namespace_blk *nsblk;
1797         struct device *dev;
1798
1799         if (!is_nd_blk(&nd_region->dev))
1800                 return NULL;
1801
1802         nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1803         if (!nsblk)
1804                 return NULL;
1805
1806         dev = &nsblk->common.dev;
1807         dev->type = &namespace_blk_device_type;
1808         nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
1809         if (nsblk->id < 0) {
1810                 kfree(nsblk);
1811                 return NULL;
1812         }
1813         dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
1814         dev->parent = &nd_region->dev;
1815         dev->groups = nd_namespace_attribute_groups;
1816
1817         return &nsblk->common.dev;
1818 }
1819
1820 void nd_region_create_blk_seed(struct nd_region *nd_region)
1821 {
1822         WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1823         nd_region->ns_seed = nd_namespace_blk_create(nd_region);
1824         /*
1825          * Seed creation failures are not fatal, provisioning is simply
1826          * disabled until memory becomes available
1827          */
1828         if (!nd_region->ns_seed)
1829                 dev_err(&nd_region->dev, "failed to create blk namespace\n");
1830         else
1831                 nd_device_register(nd_region->ns_seed);
1832 }
1833
1834 void nd_region_create_dax_seed(struct nd_region *nd_region)
1835 {
1836         WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1837         nd_region->dax_seed = nd_dax_create(nd_region);
1838         /*
1839          * Seed creation failures are not fatal, provisioning is simply
1840          * disabled until memory becomes available
1841          */
1842         if (!nd_region->dax_seed)
1843                 dev_err(&nd_region->dev, "failed to create dax namespace\n");
1844 }
1845
1846 void nd_region_create_pfn_seed(struct nd_region *nd_region)
1847 {
1848         WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1849         nd_region->pfn_seed = nd_pfn_create(nd_region);
1850         /*
1851          * Seed creation failures are not fatal, provisioning is simply
1852          * disabled until memory becomes available
1853          */
1854         if (!nd_region->pfn_seed)
1855                 dev_err(&nd_region->dev, "failed to create pfn namespace\n");
1856 }
1857
1858 void nd_region_create_btt_seed(struct nd_region *nd_region)
1859 {
1860         WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1861         nd_region->btt_seed = nd_btt_create(nd_region);
1862         /*
1863          * Seed creation failures are not fatal, provisioning is simply
1864          * disabled until memory becomes available
1865          */
1866         if (!nd_region->btt_seed)
1867                 dev_err(&nd_region->dev, "failed to create btt namespace\n");
1868 }
1869
1870 static struct device **scan_labels(struct nd_region *nd_region,
1871                 struct nd_mapping *nd_mapping)
1872 {
1873         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1874         struct device *dev, **devs = NULL;
1875         struct nd_namespace_blk *nsblk;
1876         struct nd_label_ent *label_ent;
1877         int i, count = 0;
1878
1879         list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1880                 struct nd_namespace_label *nd_label = label_ent->label;
1881                 char *name[NSLABEL_NAME_LEN];
1882                 struct device **__devs;
1883                 struct resource *res;
1884                 u32 flags;
1885
1886                 if (!nd_label)
1887                         continue;
1888                 flags = __le32_to_cpu(nd_label->flags);
1889                 if (flags & NSLABEL_FLAG_LOCAL)
1890                         /* pass */;
1891                 else
1892                         continue;
1893
1894                 for (i = 0; i < count; i++) {
1895                         nsblk = to_nd_namespace_blk(devs[i]);
1896                         if (memcmp(nsblk->uuid, nd_label->uuid,
1897                                                 NSLABEL_UUID_LEN) == 0) {
1898                                 res = nsblk_add_resource(nd_region, ndd, nsblk,
1899                                                 __le64_to_cpu(nd_label->dpa));
1900                                 if (!res)
1901                                         goto err;
1902                                 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
1903                                         dev_name(&nsblk->common.dev));
1904                                 break;
1905                         }
1906                 }
1907                 if (i < count)
1908                         continue;
1909                 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
1910                 if (!__devs)
1911                         goto err;
1912                 memcpy(__devs, devs, sizeof(dev) * count);
1913                 kfree(devs);
1914                 devs = __devs;
1915
1916                 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1917                 if (!nsblk)
1918                         goto err;
1919                 dev = &nsblk->common.dev;
1920                 dev->type = &namespace_blk_device_type;
1921                 dev->parent = &nd_region->dev;
1922                 dev_set_name(dev, "namespace%d.%d", nd_region->id, count);
1923                 devs[count++] = dev;
1924                 nsblk->id = -1;
1925                 nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
1926                 nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
1927                                 GFP_KERNEL);
1928                 if (!nsblk->uuid)
1929                         goto err;
1930                 memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
1931                 if (name[0])
1932                         nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
1933                                         GFP_KERNEL);
1934                 res = nsblk_add_resource(nd_region, ndd, nsblk,
1935                                 __le64_to_cpu(nd_label->dpa));
1936                 if (!res)
1937                         goto err;
1938                 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
1939                                 dev_name(&nsblk->common.dev));
1940         }
1941
1942         dev_dbg(&nd_region->dev, "%s: discovered %d blk namespace%s\n",
1943                         __func__, count, count == 1 ? "" : "s");
1944
1945         if (count == 0) {
1946                 /* Publish a zero-sized namespace for userspace to configure. */
1947                 nd_mapping_free_labels(nd_mapping);
1948
1949                 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
1950                 if (!devs)
1951                         goto err;
1952                 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1953                 if (!nsblk)
1954                         goto err;
1955                 dev = &nsblk->common.dev;
1956                 dev->type = &namespace_blk_device_type;
1957                 dev->parent = &nd_region->dev;
1958                 devs[count++] = dev;
1959         }
1960
1961         return devs;
1962
1963  err:
1964         for (i = 0; devs[i]; i++) {
1965                 nsblk = to_nd_namespace_blk(devs[i]);
1966                 namespace_blk_release(&nsblk->common.dev);
1967         }
1968         kfree(devs);
1969         return NULL;
1970 }
1971
1972 static struct device **create_namespace_blk(struct nd_region *nd_region)
1973 {
1974         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1975         struct device **devs;
1976
1977         if (nd_region->ndr_mappings == 0)
1978                 return NULL;
1979
1980         mutex_lock(&nd_mapping->lock);
1981         devs = scan_labels(nd_region, nd_mapping);
1982         mutex_unlock(&nd_mapping->lock);
1983
1984         return devs;
1985 }
1986
1987 static int init_active_labels(struct nd_region *nd_region)
1988 {
1989         int i;
1990
1991         for (i = 0; i < nd_region->ndr_mappings; i++) {
1992                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1993                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1994                 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1995                 struct nd_label_ent *label_ent;
1996                 int count, j;
1997
1998                 /*
1999                  * If the dimm is disabled then prevent the region from
2000                  * being activated if it aliases DPA.
2001                  */
2002                 if (!ndd) {
2003                         if ((nvdimm->flags & NDD_ALIASING) == 0)
2004                                 return 0;
2005                         dev_dbg(&nd_region->dev, "%s: is disabled, failing probe\n",
2006                                         dev_name(&nd_mapping->nvdimm->dev));
2007                         return -ENXIO;
2008                 }
2009                 nd_mapping->ndd = ndd;
2010                 atomic_inc(&nvdimm->busy);
2011                 get_ndd(ndd);
2012
2013                 count = nd_label_active_count(ndd);
2014                 dev_dbg(ndd->dev, "%s: %d\n", __func__, count);
2015                 if (!count)
2016                         continue;
2017                 for (j = 0; j < count; j++) {
2018                         struct nd_namespace_label *label;
2019
2020                         label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
2021                         if (!label_ent)
2022                                 break;
2023                         label = nd_label_active(ndd, j);
2024                         label_ent->label = label;
2025
2026                         mutex_lock(&nd_mapping->lock);
2027                         list_add_tail(&label_ent->list, &nd_mapping->labels);
2028                         mutex_unlock(&nd_mapping->lock);
2029                 }
2030
2031                 if (j >= count)
2032                         continue;
2033
2034                 mutex_lock(&nd_mapping->lock);
2035                 nd_mapping_free_labels(nd_mapping);
2036                 mutex_unlock(&nd_mapping->lock);
2037                 return -ENOMEM;
2038         }
2039
2040         return 0;
2041 }
2042
2043 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2044 {
2045         struct device **devs = NULL;
2046         int i, rc = 0, type;
2047
2048         *err = 0;
2049         nvdimm_bus_lock(&nd_region->dev);
2050         rc = init_active_labels(nd_region);
2051         if (rc) {
2052                 nvdimm_bus_unlock(&nd_region->dev);
2053                 return rc;
2054         }
2055
2056         type = nd_region_to_nstype(nd_region);
2057         switch (type) {
2058         case ND_DEVICE_NAMESPACE_IO:
2059                 devs = create_namespace_io(nd_region);
2060                 break;
2061         case ND_DEVICE_NAMESPACE_PMEM:
2062                 devs = create_namespace_pmem(nd_region);
2063                 break;
2064         case ND_DEVICE_NAMESPACE_BLK:
2065                 devs = create_namespace_blk(nd_region);
2066                 break;
2067         default:
2068                 break;
2069         }
2070         nvdimm_bus_unlock(&nd_region->dev);
2071
2072         if (!devs)
2073                 return -ENODEV;
2074
2075         for (i = 0; devs[i]; i++) {
2076                 struct device *dev = devs[i];
2077                 int id;
2078
2079                 if (type == ND_DEVICE_NAMESPACE_BLK) {
2080                         struct nd_namespace_blk *nsblk;
2081
2082                         nsblk = to_nd_namespace_blk(dev);
2083                         id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2084                                         GFP_KERNEL);
2085                         nsblk->id = id;
2086                 } else
2087                         id = i;
2088
2089                 if (id < 0)
2090                         break;
2091                 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2092                 dev->groups = nd_namespace_attribute_groups;
2093                 nd_device_register(dev);
2094         }
2095         if (i)
2096                 nd_region->ns_seed = devs[0];
2097
2098         if (devs[i]) {
2099                 int j;
2100
2101                 for (j = i; devs[j]; j++) {
2102                         struct device *dev = devs[j];
2103
2104                         device_initialize(dev);
2105                         put_device(dev);
2106                 }
2107                 *err = j - i;
2108                 /*
2109                  * All of the namespaces we tried to register failed, so
2110                  * fail region activation.
2111                  */
2112                 if (*err == 0)
2113                         rc = -ENODEV;
2114         }
2115         kfree(devs);
2116
2117         if (rc == -ENODEV)
2118                 return rc;
2119
2120         return i;
2121 }