Merge branch 'for-linus-4.8' of git://git.kernel.org/pub/scm/linux/kernel/git/mason...
[cascardo/linux.git] / drivers / nvdimm / core.c
1 /*
2  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3  *
4  * This program is free software; you can redistribute it and/or modify
5  * it under the terms of version 2 of the GNU General Public License as
6  * published by the Free Software Foundation.
7  *
8  * This program is distributed in the hope that it will be useful, but
9  * WITHOUT ANY WARRANTY; without even the implied warranty of
10  * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
11  * General Public License for more details.
12  */
13 #include <linux/libnvdimm.h>
14 #include <linux/badblocks.h>
15 #include <linux/export.h>
16 #include <linux/module.h>
17 #include <linux/blkdev.h>
18 #include <linux/device.h>
19 #include <linux/ctype.h>
20 #include <linux/ndctl.h>
21 #include <linux/mutex.h>
22 #include <linux/slab.h>
23 #include <linux/io.h>
24 #include "nd-core.h"
25 #include "nd.h"
26
27 LIST_HEAD(nvdimm_bus_list);
28 DEFINE_MUTEX(nvdimm_bus_list_mutex);
29
30 void nvdimm_bus_lock(struct device *dev)
31 {
32         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
33
34         if (!nvdimm_bus)
35                 return;
36         mutex_lock(&nvdimm_bus->reconfig_mutex);
37 }
38 EXPORT_SYMBOL(nvdimm_bus_lock);
39
40 void nvdimm_bus_unlock(struct device *dev)
41 {
42         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
43
44         if (!nvdimm_bus)
45                 return;
46         mutex_unlock(&nvdimm_bus->reconfig_mutex);
47 }
48 EXPORT_SYMBOL(nvdimm_bus_unlock);
49
50 bool is_nvdimm_bus_locked(struct device *dev)
51 {
52         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
53
54         if (!nvdimm_bus)
55                 return false;
56         return mutex_is_locked(&nvdimm_bus->reconfig_mutex);
57 }
58 EXPORT_SYMBOL(is_nvdimm_bus_locked);
59
60 struct nvdimm_map {
61         struct nvdimm_bus *nvdimm_bus;
62         struct list_head list;
63         resource_size_t offset;
64         unsigned long flags;
65         size_t size;
66         union {
67                 void *mem;
68                 void __iomem *iomem;
69         };
70         struct kref kref;
71 };
72
73 static struct nvdimm_map *find_nvdimm_map(struct device *dev,
74                 resource_size_t offset)
75 {
76         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
77         struct nvdimm_map *nvdimm_map;
78
79         list_for_each_entry(nvdimm_map, &nvdimm_bus->mapping_list, list)
80                 if (nvdimm_map->offset == offset)
81                         return nvdimm_map;
82         return NULL;
83 }
84
85 static struct nvdimm_map *alloc_nvdimm_map(struct device *dev,
86                 resource_size_t offset, size_t size, unsigned long flags)
87 {
88         struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
89         struct nvdimm_map *nvdimm_map;
90
91         nvdimm_map = kzalloc(sizeof(*nvdimm_map), GFP_KERNEL);
92         if (!nvdimm_map)
93                 return NULL;
94
95         INIT_LIST_HEAD(&nvdimm_map->list);
96         nvdimm_map->nvdimm_bus = nvdimm_bus;
97         nvdimm_map->offset = offset;
98         nvdimm_map->flags = flags;
99         nvdimm_map->size = size;
100         kref_init(&nvdimm_map->kref);
101
102         if (!request_mem_region(offset, size, dev_name(&nvdimm_bus->dev)))
103                 goto err_request_region;
104
105         if (flags)
106                 nvdimm_map->mem = memremap(offset, size, flags);
107         else
108                 nvdimm_map->iomem = ioremap(offset, size);
109
110         if (!nvdimm_map->mem)
111                 goto err_map;
112
113         dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev), "%s: bus unlocked!",
114                         __func__);
115         list_add(&nvdimm_map->list, &nvdimm_bus->mapping_list);
116
117         return nvdimm_map;
118
119  err_map:
120         release_mem_region(offset, size);
121  err_request_region:
122         kfree(nvdimm_map);
123         return NULL;
124 }
125
126 static void nvdimm_map_release(struct kref *kref)
127 {
128         struct nvdimm_bus *nvdimm_bus;
129         struct nvdimm_map *nvdimm_map;
130
131         nvdimm_map = container_of(kref, struct nvdimm_map, kref);
132         nvdimm_bus = nvdimm_map->nvdimm_bus;
133
134         dev_dbg(&nvdimm_bus->dev, "%s: %pa\n", __func__, &nvdimm_map->offset);
135         list_del(&nvdimm_map->list);
136         if (nvdimm_map->flags)
137                 memunmap(nvdimm_map->mem);
138         else
139                 iounmap(nvdimm_map->iomem);
140         release_mem_region(nvdimm_map->offset, nvdimm_map->size);
141         kfree(nvdimm_map);
142 }
143
144 static void nvdimm_map_put(void *data)
145 {
146         struct nvdimm_map *nvdimm_map = data;
147         struct nvdimm_bus *nvdimm_bus = nvdimm_map->nvdimm_bus;
148
149         nvdimm_bus_lock(&nvdimm_bus->dev);
150         kref_put(&nvdimm_map->kref, nvdimm_map_release);
151         nvdimm_bus_unlock(&nvdimm_bus->dev);
152 }
153
154 /**
155  * devm_nvdimm_memremap - map a resource that is shared across regions
156  * @dev: device that will own a reference to the shared mapping
157  * @offset: physical base address of the mapping
158  * @size: mapping size
159  * @flags: memremap flags, or, if zero, perform an ioremap instead
160  */
161 void *devm_nvdimm_memremap(struct device *dev, resource_size_t offset,
162                 size_t size, unsigned long flags)
163 {
164         struct nvdimm_map *nvdimm_map;
165
166         nvdimm_bus_lock(dev);
167         nvdimm_map = find_nvdimm_map(dev, offset);
168         if (!nvdimm_map)
169                 nvdimm_map = alloc_nvdimm_map(dev, offset, size, flags);
170         else
171                 kref_get(&nvdimm_map->kref);
172         nvdimm_bus_unlock(dev);
173
174         if (devm_add_action_or_reset(dev, nvdimm_map_put, nvdimm_map))
175                 return NULL;
176
177         return nvdimm_map->mem;
178 }
179 EXPORT_SYMBOL_GPL(devm_nvdimm_memremap);
180
181 u64 nd_fletcher64(void *addr, size_t len, bool le)
182 {
183         u32 *buf = addr;
184         u32 lo32 = 0;
185         u64 hi32 = 0;
186         int i;
187
188         for (i = 0; i < len / sizeof(u32); i++) {
189                 lo32 += le ? le32_to_cpu((__le32) buf[i]) : buf[i];
190                 hi32 += lo32;
191         }
192
193         return hi32 << 32 | lo32;
194 }
195 EXPORT_SYMBOL_GPL(nd_fletcher64);
196
197 struct nvdimm_bus_descriptor *to_nd_desc(struct nvdimm_bus *nvdimm_bus)
198 {
199         /* struct nvdimm_bus definition is private to libnvdimm */
200         return nvdimm_bus->nd_desc;
201 }
202 EXPORT_SYMBOL_GPL(to_nd_desc);
203
204 struct device *to_nvdimm_bus_dev(struct nvdimm_bus *nvdimm_bus)
205 {
206         /* struct nvdimm_bus definition is private to libnvdimm */
207         return &nvdimm_bus->dev;
208 }
209 EXPORT_SYMBOL_GPL(to_nvdimm_bus_dev);
210
211 static bool is_uuid_sep(char sep)
212 {
213         if (sep == '\n' || sep == '-' || sep == ':' || sep == '\0')
214                 return true;
215         return false;
216 }
217
218 static int nd_uuid_parse(struct device *dev, u8 *uuid_out, const char *buf,
219                 size_t len)
220 {
221         const char *str = buf;
222         u8 uuid[16];
223         int i;
224
225         for (i = 0; i < 16; i++) {
226                 if (!isxdigit(str[0]) || !isxdigit(str[1])) {
227                         dev_dbg(dev, "%s: pos: %d buf[%zd]: %c buf[%zd]: %c\n",
228                                         __func__, i, str - buf, str[0],
229                                         str + 1 - buf, str[1]);
230                         return -EINVAL;
231                 }
232
233                 uuid[i] = (hex_to_bin(str[0]) << 4) | hex_to_bin(str[1]);
234                 str += 2;
235                 if (is_uuid_sep(*str))
236                         str++;
237         }
238
239         memcpy(uuid_out, uuid, sizeof(uuid));
240         return 0;
241 }
242
243 /**
244  * nd_uuid_store: common implementation for writing 'uuid' sysfs attributes
245  * @dev: container device for the uuid property
246  * @uuid_out: uuid buffer to replace
247  * @buf: raw sysfs buffer to parse
248  *
249  * Enforce that uuids can only be changed while the device is disabled
250  * (driver detached)
251  * LOCKING: expects device_lock() is held on entry
252  */
253 int nd_uuid_store(struct device *dev, u8 **uuid_out, const char *buf,
254                 size_t len)
255 {
256         u8 uuid[16];
257         int rc;
258
259         if (dev->driver)
260                 return -EBUSY;
261
262         rc = nd_uuid_parse(dev, uuid, buf, len);
263         if (rc)
264                 return rc;
265
266         kfree(*uuid_out);
267         *uuid_out = kmemdup(uuid, sizeof(uuid), GFP_KERNEL);
268         if (!(*uuid_out))
269                 return -ENOMEM;
270
271         return 0;
272 }
273
274 ssize_t nd_sector_size_show(unsigned long current_lbasize,
275                 const unsigned long *supported, char *buf)
276 {
277         ssize_t len = 0;
278         int i;
279
280         for (i = 0; supported[i]; i++)
281                 if (current_lbasize == supported[i])
282                         len += sprintf(buf + len, "[%ld] ", supported[i]);
283                 else
284                         len += sprintf(buf + len, "%ld ", supported[i]);
285         len += sprintf(buf + len, "\n");
286         return len;
287 }
288
289 ssize_t nd_sector_size_store(struct device *dev, const char *buf,
290                 unsigned long *current_lbasize, const unsigned long *supported)
291 {
292         unsigned long lbasize;
293         int rc, i;
294
295         if (dev->driver)
296                 return -EBUSY;
297
298         rc = kstrtoul(buf, 0, &lbasize);
299         if (rc)
300                 return rc;
301
302         for (i = 0; supported[i]; i++)
303                 if (lbasize == supported[i])
304                         break;
305
306         if (supported[i]) {
307                 *current_lbasize = lbasize;
308                 return 0;
309         } else {
310                 return -EINVAL;
311         }
312 }
313
314 void __nd_iostat_start(struct bio *bio, unsigned long *start)
315 {
316         struct gendisk *disk = bio->bi_bdev->bd_disk;
317         const int rw = bio_data_dir(bio);
318         int cpu = part_stat_lock();
319
320         *start = jiffies;
321         part_round_stats(cpu, &disk->part0);
322         part_stat_inc(cpu, &disk->part0, ios[rw]);
323         part_stat_add(cpu, &disk->part0, sectors[rw], bio_sectors(bio));
324         part_inc_in_flight(&disk->part0, rw);
325         part_stat_unlock();
326 }
327 EXPORT_SYMBOL(__nd_iostat_start);
328
329 void nd_iostat_end(struct bio *bio, unsigned long start)
330 {
331         struct gendisk *disk = bio->bi_bdev->bd_disk;
332         unsigned long duration = jiffies - start;
333         const int rw = bio_data_dir(bio);
334         int cpu = part_stat_lock();
335
336         part_stat_add(cpu, &disk->part0, ticks[rw], duration);
337         part_round_stats(cpu, &disk->part0);
338         part_dec_in_flight(&disk->part0, rw);
339         part_stat_unlock();
340 }
341 EXPORT_SYMBOL(nd_iostat_end);
342
343 static ssize_t commands_show(struct device *dev,
344                 struct device_attribute *attr, char *buf)
345 {
346         int cmd, len = 0;
347         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
348         struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
349
350         for_each_set_bit(cmd, &nd_desc->cmd_mask, BITS_PER_LONG)
351                 len += sprintf(buf + len, "%s ", nvdimm_bus_cmd_name(cmd));
352         len += sprintf(buf + len, "\n");
353         return len;
354 }
355 static DEVICE_ATTR_RO(commands);
356
357 static const char *nvdimm_bus_provider(struct nvdimm_bus *nvdimm_bus)
358 {
359         struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
360         struct device *parent = nvdimm_bus->dev.parent;
361
362         if (nd_desc->provider_name)
363                 return nd_desc->provider_name;
364         else if (parent)
365                 return dev_name(parent);
366         else
367                 return "unknown";
368 }
369
370 static ssize_t provider_show(struct device *dev,
371                 struct device_attribute *attr, char *buf)
372 {
373         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
374
375         return sprintf(buf, "%s\n", nvdimm_bus_provider(nvdimm_bus));
376 }
377 static DEVICE_ATTR_RO(provider);
378
379 static int flush_namespaces(struct device *dev, void *data)
380 {
381         device_lock(dev);
382         device_unlock(dev);
383         return 0;
384 }
385
386 static int flush_regions_dimms(struct device *dev, void *data)
387 {
388         device_lock(dev);
389         device_unlock(dev);
390         device_for_each_child(dev, NULL, flush_namespaces);
391         return 0;
392 }
393
394 static ssize_t wait_probe_show(struct device *dev,
395                 struct device_attribute *attr, char *buf)
396 {
397         struct nvdimm_bus *nvdimm_bus = to_nvdimm_bus(dev);
398         struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
399         int rc;
400
401         if (nd_desc->flush_probe) {
402                 rc = nd_desc->flush_probe(nd_desc);
403                 if (rc)
404                         return rc;
405         }
406         nd_synchronize();
407         device_for_each_child(dev, NULL, flush_regions_dimms);
408         return sprintf(buf, "1\n");
409 }
410 static DEVICE_ATTR_RO(wait_probe);
411
412 static struct attribute *nvdimm_bus_attributes[] = {
413         &dev_attr_commands.attr,
414         &dev_attr_wait_probe.attr,
415         &dev_attr_provider.attr,
416         NULL,
417 };
418
419 struct attribute_group nvdimm_bus_attribute_group = {
420         .attrs = nvdimm_bus_attributes,
421 };
422 EXPORT_SYMBOL_GPL(nvdimm_bus_attribute_group);
423
424 static void set_badblock(struct badblocks *bb, sector_t s, int num)
425 {
426         dev_dbg(bb->dev, "Found a poison range (0x%llx, 0x%llx)\n",
427                         (u64) s * 512, (u64) num * 512);
428         /* this isn't an error as the hardware will still throw an exception */
429         if (badblocks_set(bb, s, num, 1))
430                 dev_info_once(bb->dev, "%s: failed for sector %llx\n",
431                                 __func__, (u64) s);
432 }
433
434 /**
435  * __add_badblock_range() - Convert a physical address range to bad sectors
436  * @bb:         badblocks instance to populate
437  * @ns_offset:  namespace offset where the error range begins (in bytes)
438  * @len:        number of bytes of poison to be added
439  *
440  * This assumes that the range provided with (ns_offset, len) is within
441  * the bounds of physical addresses for this namespace, i.e. lies in the
442  * interval [ns_start, ns_start + ns_size)
443  */
444 static void __add_badblock_range(struct badblocks *bb, u64 ns_offset, u64 len)
445 {
446         const unsigned int sector_size = 512;
447         sector_t start_sector;
448         u64 num_sectors;
449         u32 rem;
450
451         start_sector = div_u64(ns_offset, sector_size);
452         num_sectors = div_u64_rem(len, sector_size, &rem);
453         if (rem)
454                 num_sectors++;
455
456         if (unlikely(num_sectors > (u64)INT_MAX)) {
457                 u64 remaining = num_sectors;
458                 sector_t s = start_sector;
459
460                 while (remaining) {
461                         int done = min_t(u64, remaining, INT_MAX);
462
463                         set_badblock(bb, s, done);
464                         remaining -= done;
465                         s += done;
466                 }
467         } else
468                 set_badblock(bb, start_sector, num_sectors);
469 }
470
471 static void badblocks_populate(struct list_head *poison_list,
472                 struct badblocks *bb, const struct resource *res)
473 {
474         struct nd_poison *pl;
475
476         if (list_empty(poison_list))
477                 return;
478
479         list_for_each_entry(pl, poison_list, list) {
480                 u64 pl_end = pl->start + pl->length - 1;
481
482                 /* Discard intervals with no intersection */
483                 if (pl_end < res->start)
484                         continue;
485                 if (pl->start >  res->end)
486                         continue;
487                 /* Deal with any overlap after start of the namespace */
488                 if (pl->start >= res->start) {
489                         u64 start = pl->start;
490                         u64 len;
491
492                         if (pl_end <= res->end)
493                                 len = pl->length;
494                         else
495                                 len = res->start + resource_size(res)
496                                         - pl->start;
497                         __add_badblock_range(bb, start - res->start, len);
498                         continue;
499                 }
500                 /* Deal with overlap for poison starting before the namespace */
501                 if (pl->start < res->start) {
502                         u64 len;
503
504                         if (pl_end < res->end)
505                                 len = pl->start + pl->length - res->start;
506                         else
507                                 len = resource_size(res);
508                         __add_badblock_range(bb, 0, len);
509                 }
510         }
511 }
512
513 /**
514  * nvdimm_badblocks_populate() - Convert a list of poison ranges to badblocks
515  * @region: parent region of the range to interrogate
516  * @bb: badblocks instance to populate
517  * @res: resource range to consider
518  *
519  * The poison list generated during bus initialization may contain
520  * multiple, possibly overlapping physical address ranges.  Compare each
521  * of these ranges to the resource range currently being initialized,
522  * and add badblocks entries for all matching sub-ranges
523  */
524 void nvdimm_badblocks_populate(struct nd_region *nd_region,
525                 struct badblocks *bb, const struct resource *res)
526 {
527         struct nvdimm_bus *nvdimm_bus;
528         struct list_head *poison_list;
529
530         if (!is_nd_pmem(&nd_region->dev)) {
531                 dev_WARN_ONCE(&nd_region->dev, 1,
532                                 "%s only valid for pmem regions\n", __func__);
533                 return;
534         }
535         nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
536         poison_list = &nvdimm_bus->poison_list;
537
538         nvdimm_bus_lock(&nvdimm_bus->dev);
539         badblocks_populate(poison_list, bb, res);
540         nvdimm_bus_unlock(&nvdimm_bus->dev);
541 }
542 EXPORT_SYMBOL_GPL(nvdimm_badblocks_populate);
543
544 static int add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
545 {
546         struct nd_poison *pl;
547
548         pl = kzalloc(sizeof(*pl), GFP_KERNEL);
549         if (!pl)
550                 return -ENOMEM;
551
552         pl->start = addr;
553         pl->length = length;
554         list_add_tail(&pl->list, &nvdimm_bus->poison_list);
555
556         return 0;
557 }
558
559 static int bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
560 {
561         struct nd_poison *pl;
562
563         if (list_empty(&nvdimm_bus->poison_list))
564                 return add_poison(nvdimm_bus, addr, length);
565
566         /*
567          * There is a chance this is a duplicate, check for those first.
568          * This will be the common case as ARS_STATUS returns all known
569          * errors in the SPA space, and we can't query it per region
570          */
571         list_for_each_entry(pl, &nvdimm_bus->poison_list, list)
572                 if (pl->start == addr) {
573                         /* If length has changed, update this list entry */
574                         if (pl->length != length)
575                                 pl->length = length;
576                         return 0;
577                 }
578
579         /*
580          * If not a duplicate or a simple length update, add the entry as is,
581          * as any overlapping ranges will get resolved when the list is consumed
582          * and converted to badblocks
583          */
584         return add_poison(nvdimm_bus, addr, length);
585 }
586
587 int nvdimm_bus_add_poison(struct nvdimm_bus *nvdimm_bus, u64 addr, u64 length)
588 {
589         int rc;
590
591         nvdimm_bus_lock(&nvdimm_bus->dev);
592         rc = bus_add_poison(nvdimm_bus, addr, length);
593         nvdimm_bus_unlock(&nvdimm_bus->dev);
594
595         return rc;
596 }
597 EXPORT_SYMBOL_GPL(nvdimm_bus_add_poison);
598
599 #ifdef CONFIG_BLK_DEV_INTEGRITY
600 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
601 {
602         struct blk_integrity bi;
603
604         if (meta_size == 0)
605                 return 0;
606
607         memset(&bi, 0, sizeof(bi));
608
609         bi.tuple_size = meta_size;
610         bi.tag_size = meta_size;
611
612         blk_integrity_register(disk, &bi);
613         blk_queue_max_integrity_segments(disk->queue, 1);
614
615         return 0;
616 }
617 EXPORT_SYMBOL(nd_integrity_init);
618
619 #else /* CONFIG_BLK_DEV_INTEGRITY */
620 int nd_integrity_init(struct gendisk *disk, unsigned long meta_size)
621 {
622         return 0;
623 }
624 EXPORT_SYMBOL(nd_integrity_init);
625
626 #endif
627
628 static __init int libnvdimm_init(void)
629 {
630         int rc;
631
632         rc = nvdimm_bus_init();
633         if (rc)
634                 return rc;
635         rc = nvdimm_init();
636         if (rc)
637                 goto err_dimm;
638         rc = nd_region_init();
639         if (rc)
640                 goto err_region;
641         return 0;
642  err_region:
643         nvdimm_exit();
644  err_dimm:
645         nvdimm_bus_exit();
646         return rc;
647 }
648
649 static __exit void libnvdimm_exit(void)
650 {
651         WARN_ON(!list_empty(&nvdimm_bus_list));
652         nd_region_exit();
653         nvdimm_exit();
654         nvdimm_bus_exit();
655         nd_region_devs_exit();
656         nvdimm_devs_exit();
657 }
658
659 MODULE_LICENSE("GPL v2");
660 MODULE_AUTHOR("Intel Corporation");
661 subsys_initcall(libnvdimm_init);
662 module_exit(libnvdimm_exit);