mm: split gfp_mask and mapping flags into separate fields
[cascardo/linux.git] / drivers / nvdimm / namespace_devs.c
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/module.h>
14 #include <linux/device.h>
15 #include <linux/slab.h>
16 #include <linux/pmem.h>
17 #include <linux/nd.h>
18 #include "nd-core.h"
19 #include "nd.h"
20
21 static void namespace_io_release(struct device *dev)
22 {
23         struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
24
25         kfree(nsio);
26 }
27
28 static void namespace_pmem_release(struct device *dev)
29 {
30         struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
31
32         kfree(nspm->alt_name);
33         kfree(nspm->uuid);
34         kfree(nspm);
35 }
36
37 static void namespace_blk_release(struct device *dev)
38 {
39         struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
40         struct nd_region *nd_region = to_nd_region(dev->parent);
41
42         if (nsblk->id >= 0)
43                 ida_simple_remove(&nd_region->ns_ida, nsblk->id);
44         kfree(nsblk->alt_name);
45         kfree(nsblk->uuid);
46         kfree(nsblk->res);
47         kfree(nsblk);
48 }
49
50 static struct device_type namespace_io_device_type = {
51         .name = "nd_namespace_io",
52         .release = namespace_io_release,
53 };
54
55 static struct device_type namespace_pmem_device_type = {
56         .name = "nd_namespace_pmem",
57         .release = namespace_pmem_release,
58 };
59
60 static struct device_type namespace_blk_device_type = {
61         .name = "nd_namespace_blk",
62         .release = namespace_blk_release,
63 };
64
65 static bool is_namespace_pmem(struct device *dev)
66 {
67         return dev ? dev->type == &namespace_pmem_device_type : false;
68 }
69
70 static bool is_namespace_blk(struct device *dev)
71 {
72         return dev ? dev->type == &namespace_blk_device_type : false;
73 }
74
75 static bool is_namespace_io(struct device *dev)
76 {
77         return dev ? dev->type == &namespace_io_device_type : false;
78 }
79
80 static int is_uuid_busy(struct device *dev, void *data)
81 {
82         u8 *uuid1 = data, *uuid2 = NULL;
83
84         if (is_namespace_pmem(dev)) {
85                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
86
87                 uuid2 = nspm->uuid;
88         } else if (is_namespace_blk(dev)) {
89                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
90
91                 uuid2 = nsblk->uuid;
92         } else if (is_nd_btt(dev)) {
93                 struct nd_btt *nd_btt = to_nd_btt(dev);
94
95                 uuid2 = nd_btt->uuid;
96         } else if (is_nd_pfn(dev)) {
97                 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
98
99                 uuid2 = nd_pfn->uuid;
100         }
101
102         if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
103                 return -EBUSY;
104
105         return 0;
106 }
107
108 static int is_namespace_uuid_busy(struct device *dev, void *data)
109 {
110         if (is_nd_pmem(dev) || is_nd_blk(dev))
111                 return device_for_each_child(dev, data, is_uuid_busy);
112         return 0;
113 }
114
115 /**
116  * nd_is_uuid_unique - verify that no other namespace has @uuid
117  * @dev: any device on a nvdimm_bus
118  * @uuid: uuid to check
119  */
120 bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
121 {
122         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
123
124         if (!nvdimm_bus)
125                 return false;
126         WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
127         if (device_for_each_child(&nvdimm_bus->dev, uuid,
128                                 is_namespace_uuid_busy) != 0)
129                 return false;
130         return true;
131 }
132
133 bool pmem_should_map_pages(struct device *dev)
134 {
135         struct nd_region *nd_region = to_nd_region(dev->parent);
136         struct nd_namespace_io *nsio;
137
138         if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
139                 return false;
140
141         if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
142                 return false;
143
144         if (is_nd_pfn(dev) || is_nd_btt(dev))
145                 return false;
146
147         nsio = to_nd_namespace_io(dev);
148         if (region_intersects(nsio->res.start, resource_size(&nsio->res),
149                                 IORESOURCE_SYSTEM_RAM,
150                                 IORES_DESC_NONE) == REGION_MIXED)
151                 return false;
152
153 #ifdef ARCH_MEMREMAP_PMEM
154         return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
155 #else
156         return false;
157 #endif
158 }
159 EXPORT_SYMBOL(pmem_should_map_pages);
160
161 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
162                 char *name)
163 {
164         struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
165         const char *suffix = NULL;
166
167         if (ndns->claim && is_nd_btt(ndns->claim))
168                 suffix = "s";
169
170         if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
171                 sprintf(name, "pmem%d%s", nd_region->id, suffix ? suffix : "");
172         } else if (is_namespace_blk(&ndns->dev)) {
173                 struct nd_namespace_blk *nsblk;
174
175                 nsblk = to_nd_namespace_blk(&ndns->dev);
176                 sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
177                                 suffix ? suffix : "");
178         } else {
179                 return NULL;
180         }
181
182         return name;
183 }
184 EXPORT_SYMBOL(nvdimm_namespace_disk_name);
185
186 const u8 *nd_dev_to_uuid(struct device *dev)
187 {
188         static const u8 null_uuid[16];
189
190         if (!dev)
191                 return null_uuid;
192
193         if (is_namespace_pmem(dev)) {
194                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
195
196                 return nspm->uuid;
197         } else if (is_namespace_blk(dev)) {
198                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
199
200                 return nsblk->uuid;
201         } else
202                 return null_uuid;
203 }
204 EXPORT_SYMBOL(nd_dev_to_uuid);
205
206 static ssize_t nstype_show(struct device *dev,
207                 struct device_attribute *attr, char *buf)
208 {
209         struct nd_region *nd_region = to_nd_region(dev->parent);
210
211         return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
212 }
213 static DEVICE_ATTR_RO(nstype);
214
215 static ssize_t __alt_name_store(struct device *dev, const char *buf,
216                 const size_t len)
217 {
218         char *input, *pos, *alt_name, **ns_altname;
219         ssize_t rc;
220
221         if (is_namespace_pmem(dev)) {
222                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
223
224                 ns_altname = &nspm->alt_name;
225         } else if (is_namespace_blk(dev)) {
226                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
227
228                 ns_altname = &nsblk->alt_name;
229         } else
230                 return -ENXIO;
231
232         if (dev->driver || to_ndns(dev)->claim)
233                 return -EBUSY;
234
235         input = kmemdup(buf, len + 1, GFP_KERNEL);
236         if (!input)
237                 return -ENOMEM;
238
239         input[len] = '\0';
240         pos = strim(input);
241         if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
242                 rc = -EINVAL;
243                 goto out;
244         }
245
246         alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
247         if (!alt_name) {
248                 rc = -ENOMEM;
249                 goto out;
250         }
251         kfree(*ns_altname);
252         *ns_altname = alt_name;
253         sprintf(*ns_altname, "%s", pos);
254         rc = len;
255
256 out:
257         kfree(input);
258         return rc;
259 }
260
261 static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
262 {
263         struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
264         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
265         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
266         struct nd_label_id label_id;
267         resource_size_t size = 0;
268         struct resource *res;
269
270         if (!nsblk->uuid)
271                 return 0;
272         nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
273         for_each_dpa_resource(ndd, res)
274                 if (strcmp(res->name, label_id.id) == 0)
275                         size += resource_size(res);
276         return size;
277 }
278
279 static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
280 {
281         struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
282         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
283         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
284         struct nd_label_id label_id;
285         struct resource *res;
286         int count, i;
287
288         if (!nsblk->uuid || !nsblk->lbasize || !ndd)
289                 return false;
290
291         count = 0;
292         nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
293         for_each_dpa_resource(ndd, res) {
294                 if (strcmp(res->name, label_id.id) != 0)
295                         continue;
296                 /*
297                  * Resources with unacknoweldged adjustments indicate a
298                  * failure to update labels
299                  */
300                 if (res->flags & DPA_RESOURCE_ADJUSTED)
301                         return false;
302                 count++;
303         }
304
305         /* These values match after a successful label update */
306         if (count != nsblk->num_resources)
307                 return false;
308
309         for (i = 0; i < nsblk->num_resources; i++) {
310                 struct resource *found = NULL;
311
312                 for_each_dpa_resource(ndd, res)
313                         if (res == nsblk->res[i]) {
314                                 found = res;
315                                 break;
316                         }
317                 /* stale resource */
318                 if (!found)
319                         return false;
320         }
321
322         return true;
323 }
324
325 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
326 {
327         resource_size_t size;
328
329         nvdimm_bus_lock(&nsblk->common.dev);
330         size = __nd_namespace_blk_validate(nsblk);
331         nvdimm_bus_unlock(&nsblk->common.dev);
332
333         return size;
334 }
335 EXPORT_SYMBOL(nd_namespace_blk_validate);
336
337
338 static int nd_namespace_label_update(struct nd_region *nd_region,
339                 struct device *dev)
340 {
341         dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
342                         "namespace must be idle during label update\n");
343         if (dev->driver || to_ndns(dev)->claim)
344                 return 0;
345
346         /*
347          * Only allow label writes that will result in a valid namespace
348          * or deletion of an existing namespace.
349          */
350         if (is_namespace_pmem(dev)) {
351                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
352                 resource_size_t size = resource_size(&nspm->nsio.res);
353
354                 if (size == 0 && nspm->uuid)
355                         /* delete allocation */;
356                 else if (!nspm->uuid)
357                         return 0;
358
359                 return nd_pmem_namespace_label_update(nd_region, nspm, size);
360         } else if (is_namespace_blk(dev)) {
361                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
362                 resource_size_t size = nd_namespace_blk_size(nsblk);
363
364                 if (size == 0 && nsblk->uuid)
365                         /* delete allocation */;
366                 else if (!nsblk->uuid || !nsblk->lbasize)
367                         return 0;
368
369                 return nd_blk_namespace_label_update(nd_region, nsblk, size);
370         } else
371                 return -ENXIO;
372 }
373
374 static ssize_t alt_name_store(struct device *dev,
375                 struct device_attribute *attr, const char *buf, size_t len)
376 {
377         struct nd_region *nd_region = to_nd_region(dev->parent);
378         ssize_t rc;
379
380         device_lock(dev);
381         nvdimm_bus_lock(dev);
382         wait_nvdimm_bus_probe_idle(dev);
383         rc = __alt_name_store(dev, buf, len);
384         if (rc >= 0)
385                 rc = nd_namespace_label_update(nd_region, dev);
386         dev_dbg(dev, "%s: %s(%zd)\n", __func__, rc < 0 ? "fail " : "", rc);
387         nvdimm_bus_unlock(dev);
388         device_unlock(dev);
389
390         return rc < 0 ? rc : len;
391 }
392
393 static ssize_t alt_name_show(struct device *dev,
394                 struct device_attribute *attr, char *buf)
395 {
396         char *ns_altname;
397
398         if (is_namespace_pmem(dev)) {
399                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
400
401                 ns_altname = nspm->alt_name;
402         } else if (is_namespace_blk(dev)) {
403                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
404
405                 ns_altname = nsblk->alt_name;
406         } else
407                 return -ENXIO;
408
409         return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
410 }
411 static DEVICE_ATTR_RW(alt_name);
412
413 static int scan_free(struct nd_region *nd_region,
414                 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
415                 resource_size_t n)
416 {
417         bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
418         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
419         int rc = 0;
420
421         while (n) {
422                 struct resource *res, *last;
423                 resource_size_t new_start;
424
425                 last = NULL;
426                 for_each_dpa_resource(ndd, res)
427                         if (strcmp(res->name, label_id->id) == 0)
428                                 last = res;
429                 res = last;
430                 if (!res)
431                         return 0;
432
433                 if (n >= resource_size(res)) {
434                         n -= resource_size(res);
435                         nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
436                         nvdimm_free_dpa(ndd, res);
437                         /* retry with last resource deleted */
438                         continue;
439                 }
440
441                 /*
442                  * Keep BLK allocations relegated to high DPA as much as
443                  * possible
444                  */
445                 if (is_blk)
446                         new_start = res->start + n;
447                 else
448                         new_start = res->start;
449
450                 rc = adjust_resource(res, new_start, resource_size(res) - n);
451                 if (rc == 0)
452                         res->flags |= DPA_RESOURCE_ADJUSTED;
453                 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
454                 break;
455         }
456
457         return rc;
458 }
459
460 /**
461  * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
462  * @nd_region: the set of dimms to reclaim @n bytes from
463  * @label_id: unique identifier for the namespace consuming this dpa range
464  * @n: number of bytes per-dimm to release
465  *
466  * Assumes resources are ordered.  Starting from the end try to
467  * adjust_resource() the allocation to @n, but if @n is larger than the
468  * allocation delete it and find the 'new' last allocation in the label
469  * set.
470  */
471 static int shrink_dpa_allocation(struct nd_region *nd_region,
472                 struct nd_label_id *label_id, resource_size_t n)
473 {
474         int i;
475
476         for (i = 0; i < nd_region->ndr_mappings; i++) {
477                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
478                 int rc;
479
480                 rc = scan_free(nd_region, nd_mapping, label_id, n);
481                 if (rc)
482                         return rc;
483         }
484
485         return 0;
486 }
487
488 static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
489                 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
490                 resource_size_t n)
491 {
492         bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
493         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
494         resource_size_t first_dpa;
495         struct resource *res;
496         int rc = 0;
497
498         /* allocate blk from highest dpa first */
499         if (is_blk)
500                 first_dpa = nd_mapping->start + nd_mapping->size - n;
501         else
502                 first_dpa = nd_mapping->start;
503
504         /* first resource allocation for this label-id or dimm */
505         res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
506         if (!res)
507                 rc = -EBUSY;
508
509         nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
510         return rc ? n : 0;
511 }
512
513 static bool space_valid(bool is_pmem, bool is_reserve,
514                 struct nd_label_id *label_id, struct resource *res)
515 {
516         /*
517          * For BLK-space any space is valid, for PMEM-space, it must be
518          * contiguous with an existing allocation unless we are
519          * reserving pmem.
520          */
521         if (is_reserve || !is_pmem)
522                 return true;
523         if (!res || strcmp(res->name, label_id->id) == 0)
524                 return true;
525         return false;
526 }
527
528 enum alloc_loc {
529         ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
530 };
531
532 static resource_size_t scan_allocate(struct nd_region *nd_region,
533                 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
534                 resource_size_t n)
535 {
536         resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
537         bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
538         bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
539         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
540         const resource_size_t to_allocate = n;
541         struct resource *res;
542         int first;
543
544  retry:
545         first = 0;
546         for_each_dpa_resource(ndd, res) {
547                 resource_size_t allocate, available = 0, free_start, free_end;
548                 struct resource *next = res->sibling, *new_res = NULL;
549                 enum alloc_loc loc = ALLOC_ERR;
550                 const char *action;
551                 int rc = 0;
552
553                 /* ignore resources outside this nd_mapping */
554                 if (res->start > mapping_end)
555                         continue;
556                 if (res->end < nd_mapping->start)
557                         continue;
558
559                 /* space at the beginning of the mapping */
560                 if (!first++ && res->start > nd_mapping->start) {
561                         free_start = nd_mapping->start;
562                         available = res->start - free_start;
563                         if (space_valid(is_pmem, is_reserve, label_id, NULL))
564                                 loc = ALLOC_BEFORE;
565                 }
566
567                 /* space between allocations */
568                 if (!loc && next) {
569                         free_start = res->start + resource_size(res);
570                         free_end = min(mapping_end, next->start - 1);
571                         if (space_valid(is_pmem, is_reserve, label_id, res)
572                                         && free_start < free_end) {
573                                 available = free_end + 1 - free_start;
574                                 loc = ALLOC_MID;
575                         }
576                 }
577
578                 /* space at the end of the mapping */
579                 if (!loc && !next) {
580                         free_start = res->start + resource_size(res);
581                         free_end = mapping_end;
582                         if (space_valid(is_pmem, is_reserve, label_id, res)
583                                         && free_start < free_end) {
584                                 available = free_end + 1 - free_start;
585                                 loc = ALLOC_AFTER;
586                         }
587                 }
588
589                 if (!loc || !available)
590                         continue;
591                 allocate = min(available, n);
592                 switch (loc) {
593                 case ALLOC_BEFORE:
594                         if (strcmp(res->name, label_id->id) == 0) {
595                                 /* adjust current resource up */
596                                 if (is_pmem && !is_reserve)
597                                         return n;
598                                 rc = adjust_resource(res, res->start - allocate,
599                                                 resource_size(res) + allocate);
600                                 action = "cur grow up";
601                         } else
602                                 action = "allocate";
603                         break;
604                 case ALLOC_MID:
605                         if (strcmp(next->name, label_id->id) == 0) {
606                                 /* adjust next resource up */
607                                 if (is_pmem && !is_reserve)
608                                         return n;
609                                 rc = adjust_resource(next, next->start
610                                                 - allocate, resource_size(next)
611                                                 + allocate);
612                                 new_res = next;
613                                 action = "next grow up";
614                         } else if (strcmp(res->name, label_id->id) == 0) {
615                                 action = "grow down";
616                         } else
617                                 action = "allocate";
618                         break;
619                 case ALLOC_AFTER:
620                         if (strcmp(res->name, label_id->id) == 0)
621                                 action = "grow down";
622                         else
623                                 action = "allocate";
624                         break;
625                 default:
626                         return n;
627                 }
628
629                 if (strcmp(action, "allocate") == 0) {
630                         /* BLK allocate bottom up */
631                         if (!is_pmem)
632                                 free_start += available - allocate;
633                         else if (!is_reserve && free_start != nd_mapping->start)
634                                 return n;
635
636                         new_res = nvdimm_allocate_dpa(ndd, label_id,
637                                         free_start, allocate);
638                         if (!new_res)
639                                 rc = -EBUSY;
640                 } else if (strcmp(action, "grow down") == 0) {
641                         /* adjust current resource down */
642                         rc = adjust_resource(res, res->start, resource_size(res)
643                                         + allocate);
644                         if (rc == 0)
645                                 res->flags |= DPA_RESOURCE_ADJUSTED;
646                 }
647
648                 if (!new_res)
649                         new_res = res;
650
651                 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
652                                 action, loc, rc);
653
654                 if (rc)
655                         return n;
656
657                 n -= allocate;
658                 if (n) {
659                         /*
660                          * Retry scan with newly inserted resources.
661                          * For example, if we did an ALLOC_BEFORE
662                          * insertion there may also have been space
663                          * available for an ALLOC_AFTER insertion, so we
664                          * need to check this same resource again
665                          */
666                         goto retry;
667                 } else
668                         return 0;
669         }
670
671         /*
672          * If we allocated nothing in the BLK case it may be because we are in
673          * an initial "pmem-reserve pass".  Only do an initial BLK allocation
674          * when none of the DPA space is reserved.
675          */
676         if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
677                 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
678         return n;
679 }
680
681 static int merge_dpa(struct nd_region *nd_region,
682                 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
683 {
684         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
685         struct resource *res;
686
687         if (strncmp("pmem", label_id->id, 4) == 0)
688                 return 0;
689  retry:
690         for_each_dpa_resource(ndd, res) {
691                 int rc;
692                 struct resource *next = res->sibling;
693                 resource_size_t end = res->start + resource_size(res);
694
695                 if (!next || strcmp(res->name, label_id->id) != 0
696                                 || strcmp(next->name, label_id->id) != 0
697                                 || end != next->start)
698                         continue;
699                 end += resource_size(next);
700                 nvdimm_free_dpa(ndd, next);
701                 rc = adjust_resource(res, res->start, end - res->start);
702                 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
703                 if (rc)
704                         return rc;
705                 res->flags |= DPA_RESOURCE_ADJUSTED;
706                 goto retry;
707         }
708
709         return 0;
710 }
711
712 static int __reserve_free_pmem(struct device *dev, void *data)
713 {
714         struct nvdimm *nvdimm = data;
715         struct nd_region *nd_region;
716         struct nd_label_id label_id;
717         int i;
718
719         if (!is_nd_pmem(dev))
720                 return 0;
721
722         nd_region = to_nd_region(dev);
723         if (nd_region->ndr_mappings == 0)
724                 return 0;
725
726         memset(&label_id, 0, sizeof(label_id));
727         strcat(label_id.id, "pmem-reserve");
728         for (i = 0; i < nd_region->ndr_mappings; i++) {
729                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
730                 resource_size_t n, rem = 0;
731
732                 if (nd_mapping->nvdimm != nvdimm)
733                         continue;
734
735                 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
736                 if (n == 0)
737                         return 0;
738                 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
739                 dev_WARN_ONCE(&nd_region->dev, rem,
740                                 "pmem reserve underrun: %#llx of %#llx bytes\n",
741                                 (unsigned long long) n - rem,
742                                 (unsigned long long) n);
743                 return rem ? -ENXIO : 0;
744         }
745
746         return 0;
747 }
748
749 static void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
750                 struct nd_mapping *nd_mapping)
751 {
752         struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
753         struct resource *res, *_res;
754
755         for_each_dpa_resource_safe(ndd, res, _res)
756                 if (strcmp(res->name, "pmem-reserve") == 0)
757                         nvdimm_free_dpa(ndd, res);
758 }
759
760 static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
761                 struct nd_mapping *nd_mapping)
762 {
763         struct nvdimm *nvdimm = nd_mapping->nvdimm;
764         int rc;
765
766         rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
767                         __reserve_free_pmem);
768         if (rc)
769                 release_free_pmem(nvdimm_bus, nd_mapping);
770         return rc;
771 }
772
773 /**
774  * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
775  * @nd_region: the set of dimms to allocate @n more bytes from
776  * @label_id: unique identifier for the namespace consuming this dpa range
777  * @n: number of bytes per-dimm to add to the existing allocation
778  *
779  * Assumes resources are ordered.  For BLK regions, first consume
780  * BLK-only available DPA free space, then consume PMEM-aliased DPA
781  * space starting at the highest DPA.  For PMEM regions start
782  * allocations from the start of an interleave set and end at the first
783  * BLK allocation or the end of the interleave set, whichever comes
784  * first.
785  */
786 static int grow_dpa_allocation(struct nd_region *nd_region,
787                 struct nd_label_id *label_id, resource_size_t n)
788 {
789         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
790         bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
791         int i;
792
793         for (i = 0; i < nd_region->ndr_mappings; i++) {
794                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
795                 resource_size_t rem = n;
796                 int rc, j;
797
798                 /*
799                  * In the BLK case try once with all unallocated PMEM
800                  * reserved, and once without
801                  */
802                 for (j = is_pmem; j < 2; j++) {
803                         bool blk_only = j == 0;
804
805                         if (blk_only) {
806                                 rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
807                                 if (rc)
808                                         return rc;
809                         }
810                         rem = scan_allocate(nd_region, nd_mapping,
811                                         label_id, rem);
812                         if (blk_only)
813                                 release_free_pmem(nvdimm_bus, nd_mapping);
814
815                         /* try again and allow encroachments into PMEM */
816                         if (rem == 0)
817                                 break;
818                 }
819
820                 dev_WARN_ONCE(&nd_region->dev, rem,
821                                 "allocation underrun: %#llx of %#llx bytes\n",
822                                 (unsigned long long) n - rem,
823                                 (unsigned long long) n);
824                 if (rem)
825                         return -ENXIO;
826
827                 rc = merge_dpa(nd_region, nd_mapping, label_id);
828                 if (rc)
829                         return rc;
830         }
831
832         return 0;
833 }
834
835 static void nd_namespace_pmem_set_size(struct nd_region *nd_region,
836                 struct nd_namespace_pmem *nspm, resource_size_t size)
837 {
838         struct resource *res = &nspm->nsio.res;
839
840         res->start = nd_region->ndr_start;
841         res->end = nd_region->ndr_start + size - 1;
842 }
843
844 static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
845 {
846         if (!uuid) {
847                 dev_dbg(dev, "%s: uuid not set\n", where);
848                 return true;
849         }
850         return false;
851 }
852
853 static ssize_t __size_store(struct device *dev, unsigned long long val)
854 {
855         resource_size_t allocated = 0, available = 0;
856         struct nd_region *nd_region = to_nd_region(dev->parent);
857         struct nd_mapping *nd_mapping;
858         struct nvdimm_drvdata *ndd;
859         struct nd_label_id label_id;
860         u32 flags = 0, remainder;
861         u8 *uuid = NULL;
862         int rc, i;
863
864         if (dev->driver || to_ndns(dev)->claim)
865                 return -EBUSY;
866
867         if (is_namespace_pmem(dev)) {
868                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
869
870                 uuid = nspm->uuid;
871         } else if (is_namespace_blk(dev)) {
872                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
873
874                 uuid = nsblk->uuid;
875                 flags = NSLABEL_FLAG_LOCAL;
876         }
877
878         /*
879          * We need a uuid for the allocation-label and dimm(s) on which
880          * to store the label.
881          */
882         if (uuid_not_set(uuid, dev, __func__))
883                 return -ENXIO;
884         if (nd_region->ndr_mappings == 0) {
885                 dev_dbg(dev, "%s: not associated with dimm(s)\n", __func__);
886                 return -ENXIO;
887         }
888
889         div_u64_rem(val, SZ_4K * nd_region->ndr_mappings, &remainder);
890         if (remainder) {
891                 dev_dbg(dev, "%llu is not %dK aligned\n", val,
892                                 (SZ_4K * nd_region->ndr_mappings) / SZ_1K);
893                 return -EINVAL;
894         }
895
896         nd_label_gen_id(&label_id, uuid, flags);
897         for (i = 0; i < nd_region->ndr_mappings; i++) {
898                 nd_mapping = &nd_region->mapping[i];
899                 ndd = to_ndd(nd_mapping);
900
901                 /*
902                  * All dimms in an interleave set, or the base dimm for a blk
903                  * region, need to be enabled for the size to be changed.
904                  */
905                 if (!ndd)
906                         return -ENXIO;
907
908                 allocated += nvdimm_allocated_dpa(ndd, &label_id);
909         }
910         available = nd_region_available_dpa(nd_region);
911
912         if (val > available + allocated)
913                 return -ENOSPC;
914
915         if (val == allocated)
916                 return 0;
917
918         val = div_u64(val, nd_region->ndr_mappings);
919         allocated = div_u64(allocated, nd_region->ndr_mappings);
920         if (val < allocated)
921                 rc = shrink_dpa_allocation(nd_region, &label_id,
922                                 allocated - val);
923         else
924                 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
925
926         if (rc)
927                 return rc;
928
929         if (is_namespace_pmem(dev)) {
930                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
931
932                 nd_namespace_pmem_set_size(nd_region, nspm,
933                                 val * nd_region->ndr_mappings);
934         } else if (is_namespace_blk(dev)) {
935                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
936
937                 /*
938                  * Try to delete the namespace if we deleted all of its
939                  * allocation, this is not the seed device for the
940                  * region, and it is not actively claimed by a btt
941                  * instance.
942                  */
943                 if (val == 0 && nd_region->ns_seed != dev
944                                 && !nsblk->common.claim)
945                         nd_device_unregister(dev, ND_ASYNC);
946         }
947
948         return rc;
949 }
950
951 static ssize_t size_store(struct device *dev,
952                 struct device_attribute *attr, const char *buf, size_t len)
953 {
954         struct nd_region *nd_region = to_nd_region(dev->parent);
955         unsigned long long val;
956         u8 **uuid = NULL;
957         int rc;
958
959         rc = kstrtoull(buf, 0, &val);
960         if (rc)
961                 return rc;
962
963         device_lock(dev);
964         nvdimm_bus_lock(dev);
965         wait_nvdimm_bus_probe_idle(dev);
966         rc = __size_store(dev, val);
967         if (rc >= 0)
968                 rc = nd_namespace_label_update(nd_region, dev);
969
970         if (is_namespace_pmem(dev)) {
971                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
972
973                 uuid = &nspm->uuid;
974         } else if (is_namespace_blk(dev)) {
975                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
976
977                 uuid = &nsblk->uuid;
978         }
979
980         if (rc == 0 && val == 0 && uuid) {
981                 /* setting size zero == 'delete namespace' */
982                 kfree(*uuid);
983                 *uuid = NULL;
984         }
985
986         dev_dbg(dev, "%s: %llx %s (%d)\n", __func__, val, rc < 0
987                         ? "fail" : "success", rc);
988
989         nvdimm_bus_unlock(dev);
990         device_unlock(dev);
991
992         return rc < 0 ? rc : len;
993 }
994
995 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
996 {
997         struct device *dev = &ndns->dev;
998
999         if (is_namespace_pmem(dev)) {
1000                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1001
1002                 return resource_size(&nspm->nsio.res);
1003         } else if (is_namespace_blk(dev)) {
1004                 return nd_namespace_blk_size(to_nd_namespace_blk(dev));
1005         } else if (is_namespace_io(dev)) {
1006                 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1007
1008                 return resource_size(&nsio->res);
1009         } else
1010                 WARN_ONCE(1, "unknown namespace type\n");
1011         return 0;
1012 }
1013
1014 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1015 {
1016         resource_size_t size;
1017
1018         nvdimm_bus_lock(&ndns->dev);
1019         size = __nvdimm_namespace_capacity(ndns);
1020         nvdimm_bus_unlock(&ndns->dev);
1021
1022         return size;
1023 }
1024 EXPORT_SYMBOL(nvdimm_namespace_capacity);
1025
1026 static ssize_t size_show(struct device *dev,
1027                 struct device_attribute *attr, char *buf)
1028 {
1029         return sprintf(buf, "%llu\n", (unsigned long long)
1030                         nvdimm_namespace_capacity(to_ndns(dev)));
1031 }
1032 static DEVICE_ATTR(size, S_IRUGO, size_show, size_store);
1033
1034 static ssize_t uuid_show(struct device *dev,
1035                 struct device_attribute *attr, char *buf)
1036 {
1037         u8 *uuid;
1038
1039         if (is_namespace_pmem(dev)) {
1040                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1041
1042                 uuid = nspm->uuid;
1043         } else if (is_namespace_blk(dev)) {
1044                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1045
1046                 uuid = nsblk->uuid;
1047         } else
1048                 return -ENXIO;
1049
1050         if (uuid)
1051                 return sprintf(buf, "%pUb\n", uuid);
1052         return sprintf(buf, "\n");
1053 }
1054
1055 /**
1056  * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
1057  * @nd_region: parent region so we can updates all dimms in the set
1058  * @dev: namespace type for generating label_id
1059  * @new_uuid: incoming uuid
1060  * @old_uuid: reference to the uuid storage location in the namespace object
1061  */
1062 static int namespace_update_uuid(struct nd_region *nd_region,
1063                 struct device *dev, u8 *new_uuid, u8 **old_uuid)
1064 {
1065         u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
1066         struct nd_label_id old_label_id;
1067         struct nd_label_id new_label_id;
1068         int i;
1069
1070         if (!nd_is_uuid_unique(dev, new_uuid))
1071                 return -EINVAL;
1072
1073         if (*old_uuid == NULL)
1074                 goto out;
1075
1076         /*
1077          * If we've already written a label with this uuid, then it's
1078          * too late to rename because we can't reliably update the uuid
1079          * without losing the old namespace.  Userspace must delete this
1080          * namespace to abandon the old uuid.
1081          */
1082         for (i = 0; i < nd_region->ndr_mappings; i++) {
1083                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1084
1085                 /*
1086                  * This check by itself is sufficient because old_uuid
1087                  * would be NULL above if this uuid did not exist in the
1088                  * currently written set.
1089                  *
1090                  * FIXME: can we delete uuid with zero dpa allocated?
1091                  */
1092                 if (nd_mapping->labels)
1093                         return -EBUSY;
1094         }
1095
1096         nd_label_gen_id(&old_label_id, *old_uuid, flags);
1097         nd_label_gen_id(&new_label_id, new_uuid, flags);
1098         for (i = 0; i < nd_region->ndr_mappings; i++) {
1099                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1100                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1101                 struct resource *res;
1102
1103                 for_each_dpa_resource(ndd, res)
1104                         if (strcmp(res->name, old_label_id.id) == 0)
1105                                 sprintf((void *) res->name, "%s",
1106                                                 new_label_id.id);
1107         }
1108         kfree(*old_uuid);
1109  out:
1110         *old_uuid = new_uuid;
1111         return 0;
1112 }
1113
1114 static ssize_t uuid_store(struct device *dev,
1115                 struct device_attribute *attr, const char *buf, size_t len)
1116 {
1117         struct nd_region *nd_region = to_nd_region(dev->parent);
1118         u8 *uuid = NULL;
1119         ssize_t rc = 0;
1120         u8 **ns_uuid;
1121
1122         if (is_namespace_pmem(dev)) {
1123                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1124
1125                 ns_uuid = &nspm->uuid;
1126         } else if (is_namespace_blk(dev)) {
1127                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1128
1129                 ns_uuid = &nsblk->uuid;
1130         } else
1131                 return -ENXIO;
1132
1133         device_lock(dev);
1134         nvdimm_bus_lock(dev);
1135         wait_nvdimm_bus_probe_idle(dev);
1136         if (to_ndns(dev)->claim)
1137                 rc = -EBUSY;
1138         if (rc >= 0)
1139                 rc = nd_uuid_store(dev, &uuid, buf, len);
1140         if (rc >= 0)
1141                 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1142         if (rc >= 0)
1143                 rc = nd_namespace_label_update(nd_region, dev);
1144         else
1145                 kfree(uuid);
1146         dev_dbg(dev, "%s: result: %zd wrote: %s%s", __func__,
1147                         rc, buf, buf[len - 1] == '\n' ? "" : "\n");
1148         nvdimm_bus_unlock(dev);
1149         device_unlock(dev);
1150
1151         return rc < 0 ? rc : len;
1152 }
1153 static DEVICE_ATTR_RW(uuid);
1154
1155 static ssize_t resource_show(struct device *dev,
1156                 struct device_attribute *attr, char *buf)
1157 {
1158         struct resource *res;
1159
1160         if (is_namespace_pmem(dev)) {
1161                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1162
1163                 res = &nspm->nsio.res;
1164         } else if (is_namespace_io(dev)) {
1165                 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1166
1167                 res = &nsio->res;
1168         } else
1169                 return -ENXIO;
1170
1171         /* no address to convey if the namespace has no allocation */
1172         if (resource_size(res) == 0)
1173                 return -ENXIO;
1174         return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1175 }
1176 static DEVICE_ATTR_RO(resource);
1177
1178 static const unsigned long ns_lbasize_supported[] = { 512, 520, 528,
1179         4096, 4104, 4160, 4224, 0 };
1180
1181 static ssize_t sector_size_show(struct device *dev,
1182                 struct device_attribute *attr, char *buf)
1183 {
1184         struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1185
1186         if (!is_namespace_blk(dev))
1187                 return -ENXIO;
1188
1189         return nd_sector_size_show(nsblk->lbasize, ns_lbasize_supported, buf);
1190 }
1191
1192 static ssize_t sector_size_store(struct device *dev,
1193                 struct device_attribute *attr, const char *buf, size_t len)
1194 {
1195         struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1196         struct nd_region *nd_region = to_nd_region(dev->parent);
1197         ssize_t rc = 0;
1198
1199         if (!is_namespace_blk(dev))
1200                 return -ENXIO;
1201
1202         device_lock(dev);
1203         nvdimm_bus_lock(dev);
1204         if (to_ndns(dev)->claim)
1205                 rc = -EBUSY;
1206         if (rc >= 0)
1207                 rc = nd_sector_size_store(dev, buf, &nsblk->lbasize,
1208                                 ns_lbasize_supported);
1209         if (rc >= 0)
1210                 rc = nd_namespace_label_update(nd_region, dev);
1211         dev_dbg(dev, "%s: result: %zd %s: %s%s", __func__,
1212                         rc, rc < 0 ? "tried" : "wrote", buf,
1213                         buf[len - 1] == '\n' ? "" : "\n");
1214         nvdimm_bus_unlock(dev);
1215         device_unlock(dev);
1216
1217         return rc ? rc : len;
1218 }
1219 static DEVICE_ATTR_RW(sector_size);
1220
1221 static ssize_t dpa_extents_show(struct device *dev,
1222                 struct device_attribute *attr, char *buf)
1223 {
1224         struct nd_region *nd_region = to_nd_region(dev->parent);
1225         struct nd_label_id label_id;
1226         int count = 0, i;
1227         u8 *uuid = NULL;
1228         u32 flags = 0;
1229
1230         nvdimm_bus_lock(dev);
1231         if (is_namespace_pmem(dev)) {
1232                 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1233
1234                 uuid = nspm->uuid;
1235                 flags = 0;
1236         } else if (is_namespace_blk(dev)) {
1237                 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1238
1239                 uuid = nsblk->uuid;
1240                 flags = NSLABEL_FLAG_LOCAL;
1241         }
1242
1243         if (!uuid)
1244                 goto out;
1245
1246         nd_label_gen_id(&label_id, uuid, flags);
1247         for (i = 0; i < nd_region->ndr_mappings; i++) {
1248                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1249                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1250                 struct resource *res;
1251
1252                 for_each_dpa_resource(ndd, res)
1253                         if (strcmp(res->name, label_id.id) == 0)
1254                                 count++;
1255         }
1256  out:
1257         nvdimm_bus_unlock(dev);
1258
1259         return sprintf(buf, "%d\n", count);
1260 }
1261 static DEVICE_ATTR_RO(dpa_extents);
1262
1263 static ssize_t holder_show(struct device *dev,
1264                 struct device_attribute *attr, char *buf)
1265 {
1266         struct nd_namespace_common *ndns = to_ndns(dev);
1267         ssize_t rc;
1268
1269         device_lock(dev);
1270         rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1271         device_unlock(dev);
1272
1273         return rc;
1274 }
1275 static DEVICE_ATTR_RO(holder);
1276
1277 static ssize_t mode_show(struct device *dev,
1278                 struct device_attribute *attr, char *buf)
1279 {
1280         struct nd_namespace_common *ndns = to_ndns(dev);
1281         struct device *claim;
1282         char *mode;
1283         ssize_t rc;
1284
1285         device_lock(dev);
1286         claim = ndns->claim;
1287         if (claim && is_nd_btt(claim))
1288                 mode = "safe";
1289         else if (claim && is_nd_pfn(claim))
1290                 mode = "memory";
1291         else if (claim && is_nd_dax(claim))
1292                 mode = "dax";
1293         else if (!claim && pmem_should_map_pages(dev))
1294                 mode = "memory";
1295         else
1296                 mode = "raw";
1297         rc = sprintf(buf, "%s\n", mode);
1298         device_unlock(dev);
1299
1300         return rc;
1301 }
1302 static DEVICE_ATTR_RO(mode);
1303
1304 static ssize_t force_raw_store(struct device *dev,
1305                 struct device_attribute *attr, const char *buf, size_t len)
1306 {
1307         bool force_raw;
1308         int rc = strtobool(buf, &force_raw);
1309
1310         if (rc)
1311                 return rc;
1312
1313         to_ndns(dev)->force_raw = force_raw;
1314         return len;
1315 }
1316
1317 static ssize_t force_raw_show(struct device *dev,
1318                 struct device_attribute *attr, char *buf)
1319 {
1320         return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1321 }
1322 static DEVICE_ATTR_RW(force_raw);
1323
1324 static struct attribute *nd_namespace_attributes[] = {
1325         &dev_attr_nstype.attr,
1326         &dev_attr_size.attr,
1327         &dev_attr_mode.attr,
1328         &dev_attr_uuid.attr,
1329         &dev_attr_holder.attr,
1330         &dev_attr_resource.attr,
1331         &dev_attr_alt_name.attr,
1332         &dev_attr_force_raw.attr,
1333         &dev_attr_sector_size.attr,
1334         &dev_attr_dpa_extents.attr,
1335         NULL,
1336 };
1337
1338 static umode_t namespace_visible(struct kobject *kobj,
1339                 struct attribute *a, int n)
1340 {
1341         struct device *dev = container_of(kobj, struct device, kobj);
1342
1343         if (a == &dev_attr_resource.attr) {
1344                 if (is_namespace_blk(dev))
1345                         return 0;
1346                 return a->mode;
1347         }
1348
1349         if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1350                 if (a == &dev_attr_size.attr)
1351                         return S_IWUSR | S_IRUGO;
1352
1353                 if (is_namespace_pmem(dev) && a == &dev_attr_sector_size.attr)
1354                         return 0;
1355
1356                 return a->mode;
1357         }
1358
1359         if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
1360                         || a == &dev_attr_holder.attr
1361                         || a == &dev_attr_force_raw.attr
1362                         || a == &dev_attr_mode.attr)
1363                 return a->mode;
1364
1365         return 0;
1366 }
1367
1368 static struct attribute_group nd_namespace_attribute_group = {
1369         .attrs = nd_namespace_attributes,
1370         .is_visible = namespace_visible,
1371 };
1372
1373 static const struct attribute_group *nd_namespace_attribute_groups[] = {
1374         &nd_device_attribute_group,
1375         &nd_namespace_attribute_group,
1376         &nd_numa_attribute_group,
1377         NULL,
1378 };
1379
1380 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1381 {
1382         struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1383         struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
1384         struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
1385         struct nd_namespace_common *ndns = NULL;
1386         resource_size_t size;
1387
1388         if (nd_btt || nd_pfn || nd_dax) {
1389                 if (nd_btt)
1390                         ndns = nd_btt->ndns;
1391                 else if (nd_pfn)
1392                         ndns = nd_pfn->ndns;
1393                 else if (nd_dax)
1394                         ndns = nd_dax->nd_pfn.ndns;
1395
1396                 if (!ndns)
1397                         return ERR_PTR(-ENODEV);
1398
1399                 /*
1400                  * Flush any in-progess probes / removals in the driver
1401                  * for the raw personality of this namespace.
1402                  */
1403                 device_lock(&ndns->dev);
1404                 device_unlock(&ndns->dev);
1405                 if (ndns->dev.driver) {
1406                         dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1407                                         dev_name(dev));
1408                         return ERR_PTR(-EBUSY);
1409                 }
1410                 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
1411                                         "host (%s) vs claim (%s) mismatch\n",
1412                                         dev_name(dev),
1413                                         dev_name(ndns->claim)))
1414                         return ERR_PTR(-ENXIO);
1415         } else {
1416                 ndns = to_ndns(dev);
1417                 if (ndns->claim) {
1418                         dev_dbg(dev, "claimed by %s, failing probe\n",
1419                                 dev_name(ndns->claim));
1420
1421                         return ERR_PTR(-ENXIO);
1422                 }
1423         }
1424
1425         size = nvdimm_namespace_capacity(ndns);
1426         if (size < ND_MIN_NAMESPACE_SIZE) {
1427                 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1428                                 &size, ND_MIN_NAMESPACE_SIZE);
1429                 return ERR_PTR(-ENODEV);
1430         }
1431
1432         if (is_namespace_pmem(&ndns->dev)) {
1433                 struct nd_namespace_pmem *nspm;
1434
1435                 nspm = to_nd_namespace_pmem(&ndns->dev);
1436                 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
1437                         return ERR_PTR(-ENODEV);
1438         } else if (is_namespace_blk(&ndns->dev)) {
1439                 struct nd_namespace_blk *nsblk;
1440
1441                 nsblk = to_nd_namespace_blk(&ndns->dev);
1442                 if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
1443                         return ERR_PTR(-ENODEV);
1444                 if (!nsblk->lbasize) {
1445                         dev_dbg(&ndns->dev, "%s: sector size not set\n",
1446                                 __func__);
1447                         return ERR_PTR(-ENODEV);
1448                 }
1449                 if (!nd_namespace_blk_validate(nsblk))
1450                         return ERR_PTR(-ENODEV);
1451         }
1452
1453         return ndns;
1454 }
1455 EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1456
1457 static struct device **create_namespace_io(struct nd_region *nd_region)
1458 {
1459         struct nd_namespace_io *nsio;
1460         struct device *dev, **devs;
1461         struct resource *res;
1462
1463         nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1464         if (!nsio)
1465                 return NULL;
1466
1467         devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1468         if (!devs) {
1469                 kfree(nsio);
1470                 return NULL;
1471         }
1472
1473         dev = &nsio->common.dev;
1474         dev->type = &namespace_io_device_type;
1475         dev->parent = &nd_region->dev;
1476         res = &nsio->res;
1477         res->name = dev_name(&nd_region->dev);
1478         res->flags = IORESOURCE_MEM;
1479         res->start = nd_region->ndr_start;
1480         res->end = res->start + nd_region->ndr_size - 1;
1481
1482         devs[0] = dev;
1483         return devs;
1484 }
1485
1486 static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1487                 u64 cookie, u16 pos)
1488 {
1489         struct nd_namespace_label *found = NULL;
1490         int i;
1491
1492         for (i = 0; i < nd_region->ndr_mappings; i++) {
1493                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1494                 struct nd_namespace_label *nd_label;
1495                 bool found_uuid = false;
1496                 int l;
1497
1498                 for_each_label(l, nd_label, nd_mapping->labels) {
1499                         u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1500                         u16 position = __le16_to_cpu(nd_label->position);
1501                         u16 nlabel = __le16_to_cpu(nd_label->nlabel);
1502
1503                         if (isetcookie != cookie)
1504                                 continue;
1505
1506                         if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
1507                                 continue;
1508
1509                         if (found_uuid) {
1510                                 dev_dbg(to_ndd(nd_mapping)->dev,
1511                                                 "%s duplicate entry for uuid\n",
1512                                                 __func__);
1513                                 return false;
1514                         }
1515                         found_uuid = true;
1516                         if (nlabel != nd_region->ndr_mappings)
1517                                 continue;
1518                         if (position != pos)
1519                                 continue;
1520                         found = nd_label;
1521                         break;
1522                 }
1523                 if (found)
1524                         break;
1525         }
1526         return found != NULL;
1527 }
1528
1529 static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1530 {
1531         struct nd_namespace_label *select = NULL;
1532         int i;
1533
1534         if (!pmem_id)
1535                 return -ENODEV;
1536
1537         for (i = 0; i < nd_region->ndr_mappings; i++) {
1538                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1539                 struct nd_namespace_label *nd_label;
1540                 u64 hw_start, hw_end, pmem_start, pmem_end;
1541                 int l;
1542
1543                 for_each_label(l, nd_label, nd_mapping->labels)
1544                         if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
1545                                 break;
1546
1547                 if (!nd_label) {
1548                         WARN_ON(1);
1549                         return -EINVAL;
1550                 }
1551
1552                 select = nd_label;
1553                 /*
1554                  * Check that this label is compliant with the dpa
1555                  * range published in NFIT
1556                  */
1557                 hw_start = nd_mapping->start;
1558                 hw_end = hw_start + nd_mapping->size;
1559                 pmem_start = __le64_to_cpu(select->dpa);
1560                 pmem_end = pmem_start + __le64_to_cpu(select->rawsize);
1561                 if (pmem_start == hw_start && pmem_end <= hw_end)
1562                         /* pass */;
1563                 else
1564                         return -EINVAL;
1565
1566                 nd_mapping->labels[0] = select;
1567                 nd_mapping->labels[1] = NULL;
1568         }
1569         return 0;
1570 }
1571
1572 /**
1573  * find_pmem_label_set - validate interleave set labelling, retrieve label0
1574  * @nd_region: region with mappings to validate
1575  */
1576 static int find_pmem_label_set(struct nd_region *nd_region,
1577                 struct nd_namespace_pmem *nspm)
1578 {
1579         u64 cookie = nd_region_interleave_set_cookie(nd_region);
1580         struct nd_namespace_label *nd_label;
1581         u8 select_id[NSLABEL_UUID_LEN];
1582         resource_size_t size = 0;
1583         u8 *pmem_id = NULL;
1584         int rc = -ENODEV, l;
1585         u16 i;
1586
1587         if (cookie == 0)
1588                 return -ENXIO;
1589
1590         /*
1591          * Find a complete set of labels by uuid.  By definition we can start
1592          * with any mapping as the reference label
1593          */
1594         for_each_label(l, nd_label, nd_region->mapping[0].labels) {
1595                 u64 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1596
1597                 if (isetcookie != cookie)
1598                         continue;
1599
1600                 for (i = 0; nd_region->ndr_mappings; i++)
1601                         if (!has_uuid_at_pos(nd_region, nd_label->uuid,
1602                                                 cookie, i))
1603                                 break;
1604                 if (i < nd_region->ndr_mappings) {
1605                         /*
1606                          * Give up if we don't find an instance of a
1607                          * uuid at each position (from 0 to
1608                          * nd_region->ndr_mappings - 1), or if we find a
1609                          * dimm with two instances of the same uuid.
1610                          */
1611                         rc = -EINVAL;
1612                         goto err;
1613                 } else if (pmem_id) {
1614                         /*
1615                          * If there is more than one valid uuid set, we
1616                          * need userspace to clean this up.
1617                          */
1618                         rc = -EBUSY;
1619                         goto err;
1620                 }
1621                 memcpy(select_id, nd_label->uuid, NSLABEL_UUID_LEN);
1622                 pmem_id = select_id;
1623         }
1624
1625         /*
1626          * Fix up each mapping's 'labels' to have the validated pmem label for
1627          * that position at labels[0], and NULL at labels[1].  In the process,
1628          * check that the namespace aligns with interleave-set.  We know
1629          * that it does not overlap with any blk namespaces by virtue of
1630          * the dimm being enabled (i.e. nd_label_reserve_dpa()
1631          * succeeded).
1632          */
1633         rc = select_pmem_id(nd_region, pmem_id);
1634         if (rc)
1635                 goto err;
1636
1637         /* Calculate total size and populate namespace properties from label0 */
1638         for (i = 0; i < nd_region->ndr_mappings; i++) {
1639                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1640                 struct nd_namespace_label *label0 = nd_mapping->labels[0];
1641
1642                 size += __le64_to_cpu(label0->rawsize);
1643                 if (__le16_to_cpu(label0->position) != 0)
1644                         continue;
1645                 WARN_ON(nspm->alt_name || nspm->uuid);
1646                 nspm->alt_name = kmemdup((void __force *) label0->name,
1647                                 NSLABEL_NAME_LEN, GFP_KERNEL);
1648                 nspm->uuid = kmemdup((void __force *) label0->uuid,
1649                                 NSLABEL_UUID_LEN, GFP_KERNEL);
1650         }
1651
1652         if (!nspm->alt_name || !nspm->uuid) {
1653                 rc = -ENOMEM;
1654                 goto err;
1655         }
1656
1657         nd_namespace_pmem_set_size(nd_region, nspm, size);
1658
1659         return 0;
1660  err:
1661         switch (rc) {
1662         case -EINVAL:
1663                 dev_dbg(&nd_region->dev, "%s: invalid label(s)\n", __func__);
1664                 break;
1665         case -ENODEV:
1666                 dev_dbg(&nd_region->dev, "%s: label not found\n", __func__);
1667                 break;
1668         default:
1669                 dev_dbg(&nd_region->dev, "%s: unexpected err: %d\n",
1670                                 __func__, rc);
1671                 break;
1672         }
1673         return rc;
1674 }
1675
1676 static struct device **create_namespace_pmem(struct nd_region *nd_region)
1677 {
1678         struct nd_namespace_pmem *nspm;
1679         struct device *dev, **devs;
1680         struct resource *res;
1681         int rc;
1682
1683         nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1684         if (!nspm)
1685                 return NULL;
1686
1687         dev = &nspm->nsio.common.dev;
1688         dev->type = &namespace_pmem_device_type;
1689         dev->parent = &nd_region->dev;
1690         res = &nspm->nsio.res;
1691         res->name = dev_name(&nd_region->dev);
1692         res->flags = IORESOURCE_MEM;
1693         rc = find_pmem_label_set(nd_region, nspm);
1694         if (rc == -ENODEV) {
1695                 int i;
1696
1697                 /* Pass, try to permit namespace creation... */
1698                 for (i = 0; i < nd_region->ndr_mappings; i++) {
1699                         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1700
1701                         kfree(nd_mapping->labels);
1702                         nd_mapping->labels = NULL;
1703                 }
1704
1705                 /* Publish a zero-sized namespace for userspace to configure. */
1706                 nd_namespace_pmem_set_size(nd_region, nspm, 0);
1707
1708                 rc = 0;
1709         } else if (rc)
1710                 goto err;
1711
1712         devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1713         if (!devs)
1714                 goto err;
1715
1716         devs[0] = dev;
1717         return devs;
1718
1719  err:
1720         namespace_pmem_release(&nspm->nsio.common.dev);
1721         return NULL;
1722 }
1723
1724 struct resource *nsblk_add_resource(struct nd_region *nd_region,
1725                 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
1726                 resource_size_t start)
1727 {
1728         struct nd_label_id label_id;
1729         struct resource *res;
1730
1731         nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
1732         res = krealloc(nsblk->res,
1733                         sizeof(void *) * (nsblk->num_resources + 1),
1734                         GFP_KERNEL);
1735         if (!res)
1736                 return NULL;
1737         nsblk->res = (struct resource **) res;
1738         for_each_dpa_resource(ndd, res)
1739                 if (strcmp(res->name, label_id.id) == 0
1740                                 && res->start == start) {
1741                         nsblk->res[nsblk->num_resources++] = res;
1742                         return res;
1743                 }
1744         return NULL;
1745 }
1746
1747 static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
1748 {
1749         struct nd_namespace_blk *nsblk;
1750         struct device *dev;
1751
1752         if (!is_nd_blk(&nd_region->dev))
1753                 return NULL;
1754
1755         nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1756         if (!nsblk)
1757                 return NULL;
1758
1759         dev = &nsblk->common.dev;
1760         dev->type = &namespace_blk_device_type;
1761         nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
1762         if (nsblk->id < 0) {
1763                 kfree(nsblk);
1764                 return NULL;
1765         }
1766         dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
1767         dev->parent = &nd_region->dev;
1768         dev->groups = nd_namespace_attribute_groups;
1769
1770         return &nsblk->common.dev;
1771 }
1772
1773 void nd_region_create_blk_seed(struct nd_region *nd_region)
1774 {
1775         WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1776         nd_region->ns_seed = nd_namespace_blk_create(nd_region);
1777         /*
1778          * Seed creation failures are not fatal, provisioning is simply
1779          * disabled until memory becomes available
1780          */
1781         if (!nd_region->ns_seed)
1782                 dev_err(&nd_region->dev, "failed to create blk namespace\n");
1783         else
1784                 nd_device_register(nd_region->ns_seed);
1785 }
1786
1787 void nd_region_create_dax_seed(struct nd_region *nd_region)
1788 {
1789         WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1790         nd_region->dax_seed = nd_dax_create(nd_region);
1791         /*
1792          * Seed creation failures are not fatal, provisioning is simply
1793          * disabled until memory becomes available
1794          */
1795         if (!nd_region->dax_seed)
1796                 dev_err(&nd_region->dev, "failed to create dax namespace\n");
1797 }
1798
1799 void nd_region_create_pfn_seed(struct nd_region *nd_region)
1800 {
1801         WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1802         nd_region->pfn_seed = nd_pfn_create(nd_region);
1803         /*
1804          * Seed creation failures are not fatal, provisioning is simply
1805          * disabled until memory becomes available
1806          */
1807         if (!nd_region->pfn_seed)
1808                 dev_err(&nd_region->dev, "failed to create pfn namespace\n");
1809 }
1810
1811 void nd_region_create_btt_seed(struct nd_region *nd_region)
1812 {
1813         WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
1814         nd_region->btt_seed = nd_btt_create(nd_region);
1815         /*
1816          * Seed creation failures are not fatal, provisioning is simply
1817          * disabled until memory becomes available
1818          */
1819         if (!nd_region->btt_seed)
1820                 dev_err(&nd_region->dev, "failed to create btt namespace\n");
1821 }
1822
1823 static struct device **create_namespace_blk(struct nd_region *nd_region)
1824 {
1825         struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1826         struct nd_namespace_label *nd_label;
1827         struct device *dev, **devs = NULL;
1828         struct nd_namespace_blk *nsblk;
1829         struct nvdimm_drvdata *ndd;
1830         int i, l, count = 0;
1831         struct resource *res;
1832
1833         if (nd_region->ndr_mappings == 0)
1834                 return NULL;
1835
1836         ndd = to_ndd(nd_mapping);
1837         for_each_label(l, nd_label, nd_mapping->labels) {
1838                 u32 flags = __le32_to_cpu(nd_label->flags);
1839                 char *name[NSLABEL_NAME_LEN];
1840                 struct device **__devs;
1841
1842                 if (flags & NSLABEL_FLAG_LOCAL)
1843                         /* pass */;
1844                 else
1845                         continue;
1846
1847                 for (i = 0; i < count; i++) {
1848                         nsblk = to_nd_namespace_blk(devs[i]);
1849                         if (memcmp(nsblk->uuid, nd_label->uuid,
1850                                                 NSLABEL_UUID_LEN) == 0) {
1851                                 res = nsblk_add_resource(nd_region, ndd, nsblk,
1852                                                 __le64_to_cpu(nd_label->dpa));
1853                                 if (!res)
1854                                         goto err;
1855                                 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
1856                                         dev_name(&nsblk->common.dev));
1857                                 break;
1858                         }
1859                 }
1860                 if (i < count)
1861                         continue;
1862                 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
1863                 if (!__devs)
1864                         goto err;
1865                 memcpy(__devs, devs, sizeof(dev) * count);
1866                 kfree(devs);
1867                 devs = __devs;
1868
1869                 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1870                 if (!nsblk)
1871                         goto err;
1872                 dev = &nsblk->common.dev;
1873                 dev->type = &namespace_blk_device_type;
1874                 dev->parent = &nd_region->dev;
1875                 dev_set_name(dev, "namespace%d.%d", nd_region->id, count);
1876                 devs[count++] = dev;
1877                 nsblk->id = -1;
1878                 nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
1879                 nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
1880                                 GFP_KERNEL);
1881                 if (!nsblk->uuid)
1882                         goto err;
1883                 memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
1884                 if (name[0])
1885                         nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
1886                                         GFP_KERNEL);
1887                 res = nsblk_add_resource(nd_region, ndd, nsblk,
1888                                 __le64_to_cpu(nd_label->dpa));
1889                 if (!res)
1890                         goto err;
1891                 nd_dbg_dpa(nd_region, ndd, res, "%s assign\n",
1892                                 dev_name(&nsblk->common.dev));
1893         }
1894
1895         dev_dbg(&nd_region->dev, "%s: discovered %d blk namespace%s\n",
1896                         __func__, count, count == 1 ? "" : "s");
1897
1898         if (count == 0) {
1899                 /* Publish a zero-sized namespace for userspace to configure. */
1900                 for (i = 0; i < nd_region->ndr_mappings; i++) {
1901                         struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1902
1903                         kfree(nd_mapping->labels);
1904                         nd_mapping->labels = NULL;
1905                 }
1906
1907                 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
1908                 if (!devs)
1909                         goto err;
1910                 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
1911                 if (!nsblk)
1912                         goto err;
1913                 dev = &nsblk->common.dev;
1914                 dev->type = &namespace_blk_device_type;
1915                 dev->parent = &nd_region->dev;
1916                 devs[count++] = dev;
1917         }
1918
1919         return devs;
1920
1921 err:
1922         for (i = 0; i < count; i++) {
1923                 nsblk = to_nd_namespace_blk(devs[i]);
1924                 namespace_blk_release(&nsblk->common.dev);
1925         }
1926         kfree(devs);
1927         return NULL;
1928 }
1929
1930 static int init_active_labels(struct nd_region *nd_region)
1931 {
1932         int i;
1933
1934         for (i = 0; i < nd_region->ndr_mappings; i++) {
1935                 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1936                 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1937                 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1938                 int count, j;
1939
1940                 /*
1941                  * If the dimm is disabled then prevent the region from
1942                  * being activated if it aliases DPA.
1943                  */
1944                 if (!ndd) {
1945                         if ((nvdimm->flags & NDD_ALIASING) == 0)
1946                                 return 0;
1947                         dev_dbg(&nd_region->dev, "%s: is disabled, failing probe\n",
1948                                         dev_name(&nd_mapping->nvdimm->dev));
1949                         return -ENXIO;
1950                 }
1951                 nd_mapping->ndd = ndd;
1952                 atomic_inc(&nvdimm->busy);
1953                 get_ndd(ndd);
1954
1955                 count = nd_label_active_count(ndd);
1956                 dev_dbg(ndd->dev, "%s: %d\n", __func__, count);
1957                 if (!count)
1958                         continue;
1959                 nd_mapping->labels = kcalloc(count + 1, sizeof(void *),
1960                                 GFP_KERNEL);
1961                 if (!nd_mapping->labels)
1962                         return -ENOMEM;
1963                 for (j = 0; j < count; j++) {
1964                         struct nd_namespace_label *label;
1965
1966                         label = nd_label_active(ndd, j);
1967                         nd_mapping->labels[j] = label;
1968                 }
1969         }
1970
1971         return 0;
1972 }
1973
1974 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
1975 {
1976         struct device **devs = NULL;
1977         int i, rc = 0, type;
1978
1979         *err = 0;
1980         nvdimm_bus_lock(&nd_region->dev);
1981         rc = init_active_labels(nd_region);
1982         if (rc) {
1983                 nvdimm_bus_unlock(&nd_region->dev);
1984                 return rc;
1985         }
1986
1987         type = nd_region_to_nstype(nd_region);
1988         switch (type) {
1989         case ND_DEVICE_NAMESPACE_IO:
1990                 devs = create_namespace_io(nd_region);
1991                 break;
1992         case ND_DEVICE_NAMESPACE_PMEM:
1993                 devs = create_namespace_pmem(nd_region);
1994                 break;
1995         case ND_DEVICE_NAMESPACE_BLK:
1996                 devs = create_namespace_blk(nd_region);
1997                 break;
1998         default:
1999                 break;
2000         }
2001         nvdimm_bus_unlock(&nd_region->dev);
2002
2003         if (!devs)
2004                 return -ENODEV;
2005
2006         for (i = 0; devs[i]; i++) {
2007                 struct device *dev = devs[i];
2008                 int id;
2009
2010                 if (type == ND_DEVICE_NAMESPACE_BLK) {
2011                         struct nd_namespace_blk *nsblk;
2012
2013                         nsblk = to_nd_namespace_blk(dev);
2014                         id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2015                                         GFP_KERNEL);
2016                         nsblk->id = id;
2017                 } else
2018                         id = i;
2019
2020                 if (id < 0)
2021                         break;
2022                 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2023                 dev->groups = nd_namespace_attribute_groups;
2024                 nd_device_register(dev);
2025         }
2026         if (i)
2027                 nd_region->ns_seed = devs[0];
2028
2029         if (devs[i]) {
2030                 int j;
2031
2032                 for (j = i; devs[j]; j++) {
2033                         struct device *dev = devs[j];
2034
2035                         device_initialize(dev);
2036                         put_device(dev);
2037                 }
2038                 *err = j - i;
2039                 /*
2040                  * All of the namespaces we tried to register failed, so
2041                  * fail region activation.
2042                  */
2043                 if (*err == 0)
2044                         rc = -ENODEV;
2045         }
2046         kfree(devs);
2047
2048         if (rc == -ENODEV)
2049                 return rc;
2050
2051         return i;
2052 }