1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4 */
5 #include <linux/module.h>
6 #include <linux/device.h>
7 #include <linux/sort.h>
8 #include <linux/slab.h>
9 #include <linux/list.h>
10 #include <linux/nd.h>
11 #include "nd-core.h"
12 #include "pmem.h"
13 #include "pfn.h"
14 #include "nd.h"
15
namespace_io_release(struct device * dev)16 static void namespace_io_release(struct device *dev)
17 {
18 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
19
20 kfree(nsio);
21 }
22
namespace_pmem_release(struct device * dev)23 static void namespace_pmem_release(struct device *dev)
24 {
25 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
26 struct nd_region *nd_region = to_nd_region(dev->parent);
27
28 if (nspm->id >= 0)
29 ida_simple_remove(&nd_region->ns_ida, nspm->id);
30 kfree(nspm->alt_name);
31 kfree(nspm->uuid);
32 kfree(nspm);
33 }
34
namespace_blk_release(struct device * dev)35 static void namespace_blk_release(struct device *dev)
36 {
37 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
38 struct nd_region *nd_region = to_nd_region(dev->parent);
39
40 if (nsblk->id >= 0)
41 ida_simple_remove(&nd_region->ns_ida, nsblk->id);
42 kfree(nsblk->alt_name);
43 kfree(nsblk->uuid);
44 kfree(nsblk->res);
45 kfree(nsblk);
46 }
47
48 static bool is_namespace_pmem(const struct device *dev);
49 static bool is_namespace_blk(const struct device *dev);
50 static bool is_namespace_io(const struct device *dev);
51
is_uuid_busy(struct device * dev,void * data)52 static int is_uuid_busy(struct device *dev, void *data)
53 {
54 u8 *uuid1 = data, *uuid2 = NULL;
55
56 if (is_namespace_pmem(dev)) {
57 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
58
59 uuid2 = nspm->uuid;
60 } else if (is_namespace_blk(dev)) {
61 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
62
63 uuid2 = nsblk->uuid;
64 } else if (is_nd_btt(dev)) {
65 struct nd_btt *nd_btt = to_nd_btt(dev);
66
67 uuid2 = nd_btt->uuid;
68 } else if (is_nd_pfn(dev)) {
69 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
70
71 uuid2 = nd_pfn->uuid;
72 }
73
74 if (uuid2 && memcmp(uuid1, uuid2, NSLABEL_UUID_LEN) == 0)
75 return -EBUSY;
76
77 return 0;
78 }
79
is_namespace_uuid_busy(struct device * dev,void * data)80 static int is_namespace_uuid_busy(struct device *dev, void *data)
81 {
82 if (is_nd_region(dev))
83 return device_for_each_child(dev, data, is_uuid_busy);
84 return 0;
85 }
86
87 /**
88 * nd_is_uuid_unique - verify that no other namespace has @uuid
89 * @dev: any device on a nvdimm_bus
90 * @uuid: uuid to check
91 */
nd_is_uuid_unique(struct device * dev,u8 * uuid)92 bool nd_is_uuid_unique(struct device *dev, u8 *uuid)
93 {
94 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
95
96 if (!nvdimm_bus)
97 return false;
98 WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm_bus->dev));
99 if (device_for_each_child(&nvdimm_bus->dev, uuid,
100 is_namespace_uuid_busy) != 0)
101 return false;
102 return true;
103 }
104
pmem_should_map_pages(struct device * dev)105 bool pmem_should_map_pages(struct device *dev)
106 {
107 struct nd_region *nd_region = to_nd_region(dev->parent);
108 struct nd_namespace_common *ndns = to_ndns(dev);
109 struct nd_namespace_io *nsio;
110
111 if (!IS_ENABLED(CONFIG_ZONE_DEVICE))
112 return false;
113
114 if (!test_bit(ND_REGION_PAGEMAP, &nd_region->flags))
115 return false;
116
117 if (is_nd_pfn(dev) || is_nd_btt(dev))
118 return false;
119
120 if (ndns->force_raw)
121 return false;
122
123 nsio = to_nd_namespace_io(dev);
124 if (region_intersects(nsio->res.start, resource_size(&nsio->res),
125 IORESOURCE_SYSTEM_RAM,
126 IORES_DESC_NONE) == REGION_MIXED)
127 return false;
128
129 return ARCH_MEMREMAP_PMEM == MEMREMAP_WB;
130 }
131 EXPORT_SYMBOL(pmem_should_map_pages);
132
pmem_sector_size(struct nd_namespace_common * ndns)133 unsigned int pmem_sector_size(struct nd_namespace_common *ndns)
134 {
135 if (is_namespace_pmem(&ndns->dev)) {
136 struct nd_namespace_pmem *nspm;
137
138 nspm = to_nd_namespace_pmem(&ndns->dev);
139 if (nspm->lbasize == 0 || nspm->lbasize == 512)
140 /* default */;
141 else if (nspm->lbasize == 4096)
142 return 4096;
143 else
144 dev_WARN(&ndns->dev, "unsupported sector size: %ld\n",
145 nspm->lbasize);
146 }
147
148 /*
149 * There is no namespace label (is_namespace_io()), or the label
150 * indicates the default sector size.
151 */
152 return 512;
153 }
154 EXPORT_SYMBOL(pmem_sector_size);
155
nvdimm_namespace_disk_name(struct nd_namespace_common * ndns,char * name)156 const char *nvdimm_namespace_disk_name(struct nd_namespace_common *ndns,
157 char *name)
158 {
159 struct nd_region *nd_region = to_nd_region(ndns->dev.parent);
160 const char *suffix = NULL;
161
162 if (ndns->claim && is_nd_btt(ndns->claim))
163 suffix = "s";
164
165 if (is_namespace_pmem(&ndns->dev) || is_namespace_io(&ndns->dev)) {
166 int nsidx = 0;
167
168 if (is_namespace_pmem(&ndns->dev)) {
169 struct nd_namespace_pmem *nspm;
170
171 nspm = to_nd_namespace_pmem(&ndns->dev);
172 nsidx = nspm->id;
173 }
174
175 if (nsidx)
176 sprintf(name, "pmem%d.%d%s", nd_region->id, nsidx,
177 suffix ? suffix : "");
178 else
179 sprintf(name, "pmem%d%s", nd_region->id,
180 suffix ? suffix : "");
181 } else if (is_namespace_blk(&ndns->dev)) {
182 struct nd_namespace_blk *nsblk;
183
184 nsblk = to_nd_namespace_blk(&ndns->dev);
185 sprintf(name, "ndblk%d.%d%s", nd_region->id, nsblk->id,
186 suffix ? suffix : "");
187 } else {
188 return NULL;
189 }
190
191 return name;
192 }
193 EXPORT_SYMBOL(nvdimm_namespace_disk_name);
194
nd_dev_to_uuid(struct device * dev)195 const u8 *nd_dev_to_uuid(struct device *dev)
196 {
197 static const u8 null_uuid[16];
198
199 if (!dev)
200 return null_uuid;
201
202 if (is_namespace_pmem(dev)) {
203 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
204
205 return nspm->uuid;
206 } else if (is_namespace_blk(dev)) {
207 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
208
209 return nsblk->uuid;
210 } else
211 return null_uuid;
212 }
213 EXPORT_SYMBOL(nd_dev_to_uuid);
214
nstype_show(struct device * dev,struct device_attribute * attr,char * buf)215 static ssize_t nstype_show(struct device *dev,
216 struct device_attribute *attr, char *buf)
217 {
218 struct nd_region *nd_region = to_nd_region(dev->parent);
219
220 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
221 }
222 static DEVICE_ATTR_RO(nstype);
223
__alt_name_store(struct device * dev,const char * buf,const size_t len)224 static ssize_t __alt_name_store(struct device *dev, const char *buf,
225 const size_t len)
226 {
227 char *input, *pos, *alt_name, **ns_altname;
228 ssize_t rc;
229
230 if (is_namespace_pmem(dev)) {
231 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
232
233 ns_altname = &nspm->alt_name;
234 } else if (is_namespace_blk(dev)) {
235 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
236
237 ns_altname = &nsblk->alt_name;
238 } else
239 return -ENXIO;
240
241 if (dev->driver || to_ndns(dev)->claim)
242 return -EBUSY;
243
244 input = kstrndup(buf, len, GFP_KERNEL);
245 if (!input)
246 return -ENOMEM;
247
248 pos = strim(input);
249 if (strlen(pos) + 1 > NSLABEL_NAME_LEN) {
250 rc = -EINVAL;
251 goto out;
252 }
253
254 alt_name = kzalloc(NSLABEL_NAME_LEN, GFP_KERNEL);
255 if (!alt_name) {
256 rc = -ENOMEM;
257 goto out;
258 }
259 kfree(*ns_altname);
260 *ns_altname = alt_name;
261 sprintf(*ns_altname, "%s", pos);
262 rc = len;
263
264 out:
265 kfree(input);
266 return rc;
267 }
268
nd_namespace_blk_size(struct nd_namespace_blk * nsblk)269 static resource_size_t nd_namespace_blk_size(struct nd_namespace_blk *nsblk)
270 {
271 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
272 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
273 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
274 struct nd_label_id label_id;
275 resource_size_t size = 0;
276 struct resource *res;
277
278 if (!nsblk->uuid)
279 return 0;
280 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
281 for_each_dpa_resource(ndd, res)
282 if (strcmp(res->name, label_id.id) == 0)
283 size += resource_size(res);
284 return size;
285 }
286
__nd_namespace_blk_validate(struct nd_namespace_blk * nsblk)287 static bool __nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
288 {
289 struct nd_region *nd_region = to_nd_region(nsblk->common.dev.parent);
290 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
291 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
292 struct nd_label_id label_id;
293 struct resource *res;
294 int count, i;
295
296 if (!nsblk->uuid || !nsblk->lbasize || !ndd)
297 return false;
298
299 count = 0;
300 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
301 for_each_dpa_resource(ndd, res) {
302 if (strcmp(res->name, label_id.id) != 0)
303 continue;
304 /*
305 * Resources with unacknowledged adjustments indicate a
306 * failure to update labels
307 */
308 if (res->flags & DPA_RESOURCE_ADJUSTED)
309 return false;
310 count++;
311 }
312
313 /* These values match after a successful label update */
314 if (count != nsblk->num_resources)
315 return false;
316
317 for (i = 0; i < nsblk->num_resources; i++) {
318 struct resource *found = NULL;
319
320 for_each_dpa_resource(ndd, res)
321 if (res == nsblk->res[i]) {
322 found = res;
323 break;
324 }
325 /* stale resource */
326 if (!found)
327 return false;
328 }
329
330 return true;
331 }
332
nd_namespace_blk_validate(struct nd_namespace_blk * nsblk)333 resource_size_t nd_namespace_blk_validate(struct nd_namespace_blk *nsblk)
334 {
335 resource_size_t size;
336
337 nvdimm_bus_lock(&nsblk->common.dev);
338 size = __nd_namespace_blk_validate(nsblk);
339 nvdimm_bus_unlock(&nsblk->common.dev);
340
341 return size;
342 }
343 EXPORT_SYMBOL(nd_namespace_blk_validate);
344
345
nd_namespace_label_update(struct nd_region * nd_region,struct device * dev)346 static int nd_namespace_label_update(struct nd_region *nd_region,
347 struct device *dev)
348 {
349 dev_WARN_ONCE(dev, dev->driver || to_ndns(dev)->claim,
350 "namespace must be idle during label update\n");
351 if (dev->driver || to_ndns(dev)->claim)
352 return 0;
353
354 /*
355 * Only allow label writes that will result in a valid namespace
356 * or deletion of an existing namespace.
357 */
358 if (is_namespace_pmem(dev)) {
359 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
360 resource_size_t size = resource_size(&nspm->nsio.res);
361
362 if (size == 0 && nspm->uuid)
363 /* delete allocation */;
364 else if (!nspm->uuid)
365 return 0;
366
367 return nd_pmem_namespace_label_update(nd_region, nspm, size);
368 } else if (is_namespace_blk(dev)) {
369 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
370 resource_size_t size = nd_namespace_blk_size(nsblk);
371
372 if (size == 0 && nsblk->uuid)
373 /* delete allocation */;
374 else if (!nsblk->uuid || !nsblk->lbasize)
375 return 0;
376
377 return nd_blk_namespace_label_update(nd_region, nsblk, size);
378 } else
379 return -ENXIO;
380 }
381
alt_name_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)382 static ssize_t alt_name_store(struct device *dev,
383 struct device_attribute *attr, const char *buf, size_t len)
384 {
385 struct nd_region *nd_region = to_nd_region(dev->parent);
386 ssize_t rc;
387
388 nd_device_lock(dev);
389 nvdimm_bus_lock(dev);
390 wait_nvdimm_bus_probe_idle(dev);
391 rc = __alt_name_store(dev, buf, len);
392 if (rc >= 0)
393 rc = nd_namespace_label_update(nd_region, dev);
394 dev_dbg(dev, "%s(%zd)\n", rc < 0 ? "fail " : "", rc);
395 nvdimm_bus_unlock(dev);
396 nd_device_unlock(dev);
397
398 return rc < 0 ? rc : len;
399 }
400
alt_name_show(struct device * dev,struct device_attribute * attr,char * buf)401 static ssize_t alt_name_show(struct device *dev,
402 struct device_attribute *attr, char *buf)
403 {
404 char *ns_altname;
405
406 if (is_namespace_pmem(dev)) {
407 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
408
409 ns_altname = nspm->alt_name;
410 } else if (is_namespace_blk(dev)) {
411 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
412
413 ns_altname = nsblk->alt_name;
414 } else
415 return -ENXIO;
416
417 return sprintf(buf, "%s\n", ns_altname ? ns_altname : "");
418 }
419 static DEVICE_ATTR_RW(alt_name);
420
scan_free(struct nd_region * nd_region,struct nd_mapping * nd_mapping,struct nd_label_id * label_id,resource_size_t n)421 static int scan_free(struct nd_region *nd_region,
422 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
423 resource_size_t n)
424 {
425 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
426 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
427 int rc = 0;
428
429 while (n) {
430 struct resource *res, *last;
431 resource_size_t new_start;
432
433 last = NULL;
434 for_each_dpa_resource(ndd, res)
435 if (strcmp(res->name, label_id->id) == 0)
436 last = res;
437 res = last;
438 if (!res)
439 return 0;
440
441 if (n >= resource_size(res)) {
442 n -= resource_size(res);
443 nd_dbg_dpa(nd_region, ndd, res, "delete %d\n", rc);
444 nvdimm_free_dpa(ndd, res);
445 /* retry with last resource deleted */
446 continue;
447 }
448
449 /*
450 * Keep BLK allocations relegated to high DPA as much as
451 * possible
452 */
453 if (is_blk)
454 new_start = res->start + n;
455 else
456 new_start = res->start;
457
458 rc = adjust_resource(res, new_start, resource_size(res) - n);
459 if (rc == 0)
460 res->flags |= DPA_RESOURCE_ADJUSTED;
461 nd_dbg_dpa(nd_region, ndd, res, "shrink %d\n", rc);
462 break;
463 }
464
465 return rc;
466 }
467
468 /**
469 * shrink_dpa_allocation - for each dimm in region free n bytes for label_id
470 * @nd_region: the set of dimms to reclaim @n bytes from
471 * @label_id: unique identifier for the namespace consuming this dpa range
472 * @n: number of bytes per-dimm to release
473 *
474 * Assumes resources are ordered. Starting from the end try to
475 * adjust_resource() the allocation to @n, but if @n is larger than the
476 * allocation delete it and find the 'new' last allocation in the label
477 * set.
478 */
shrink_dpa_allocation(struct nd_region * nd_region,struct nd_label_id * label_id,resource_size_t n)479 static int shrink_dpa_allocation(struct nd_region *nd_region,
480 struct nd_label_id *label_id, resource_size_t n)
481 {
482 int i;
483
484 for (i = 0; i < nd_region->ndr_mappings; i++) {
485 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
486 int rc;
487
488 rc = scan_free(nd_region, nd_mapping, label_id, n);
489 if (rc)
490 return rc;
491 }
492
493 return 0;
494 }
495
init_dpa_allocation(struct nd_label_id * label_id,struct nd_region * nd_region,struct nd_mapping * nd_mapping,resource_size_t n)496 static resource_size_t init_dpa_allocation(struct nd_label_id *label_id,
497 struct nd_region *nd_region, struct nd_mapping *nd_mapping,
498 resource_size_t n)
499 {
500 bool is_blk = strncmp(label_id->id, "blk", 3) == 0;
501 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
502 resource_size_t first_dpa;
503 struct resource *res;
504 int rc = 0;
505
506 /* allocate blk from highest dpa first */
507 if (is_blk)
508 first_dpa = nd_mapping->start + nd_mapping->size - n;
509 else
510 first_dpa = nd_mapping->start;
511
512 /* first resource allocation for this label-id or dimm */
513 res = nvdimm_allocate_dpa(ndd, label_id, first_dpa, n);
514 if (!res)
515 rc = -EBUSY;
516
517 nd_dbg_dpa(nd_region, ndd, res, "init %d\n", rc);
518 return rc ? n : 0;
519 }
520
521
522 /**
523 * space_valid() - validate free dpa space against constraints
524 * @nd_region: hosting region of the free space
525 * @ndd: dimm device data for debug
526 * @label_id: namespace id to allocate space
527 * @prev: potential allocation that precedes free space
528 * @next: allocation that follows the given free space range
529 * @exist: first allocation with same id in the mapping
530 * @n: range that must satisfied for pmem allocations
531 * @valid: free space range to validate
532 *
533 * BLK-space is valid as long as it does not precede a PMEM
534 * allocation in a given region. PMEM-space must be contiguous
535 * and adjacent to an existing existing allocation (if one
536 * exists). If reserving PMEM any space is valid.
537 */
space_valid(struct nd_region * nd_region,struct nvdimm_drvdata * ndd,struct nd_label_id * label_id,struct resource * prev,struct resource * next,struct resource * exist,resource_size_t n,struct resource * valid)538 static void space_valid(struct nd_region *nd_region, struct nvdimm_drvdata *ndd,
539 struct nd_label_id *label_id, struct resource *prev,
540 struct resource *next, struct resource *exist,
541 resource_size_t n, struct resource *valid)
542 {
543 bool is_reserve = strcmp(label_id->id, "pmem-reserve") == 0;
544 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
545 unsigned long align;
546
547 align = nd_region->align / nd_region->ndr_mappings;
548 valid->start = ALIGN(valid->start, align);
549 valid->end = ALIGN_DOWN(valid->end + 1, align) - 1;
550
551 if (valid->start >= valid->end)
552 goto invalid;
553
554 if (is_reserve)
555 return;
556
557 if (!is_pmem) {
558 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
559 struct nvdimm_bus *nvdimm_bus;
560 struct blk_alloc_info info = {
561 .nd_mapping = nd_mapping,
562 .available = nd_mapping->size,
563 .res = valid,
564 };
565
566 WARN_ON(!is_nd_blk(&nd_region->dev));
567 nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
568 device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
569 return;
570 }
571
572 /* allocation needs to be contiguous, so this is all or nothing */
573 if (resource_size(valid) < n)
574 goto invalid;
575
576 /* we've got all the space we need and no existing allocation */
577 if (!exist)
578 return;
579
580 /* allocation needs to be contiguous with the existing namespace */
581 if (valid->start == exist->end + 1
582 || valid->end == exist->start - 1)
583 return;
584
585 invalid:
586 /* truncate @valid size to 0 */
587 valid->end = valid->start - 1;
588 }
589
590 enum alloc_loc {
591 ALLOC_ERR = 0, ALLOC_BEFORE, ALLOC_MID, ALLOC_AFTER,
592 };
593
scan_allocate(struct nd_region * nd_region,struct nd_mapping * nd_mapping,struct nd_label_id * label_id,resource_size_t n)594 static resource_size_t scan_allocate(struct nd_region *nd_region,
595 struct nd_mapping *nd_mapping, struct nd_label_id *label_id,
596 resource_size_t n)
597 {
598 resource_size_t mapping_end = nd_mapping->start + nd_mapping->size - 1;
599 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
600 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
601 struct resource *res, *exist = NULL, valid;
602 const resource_size_t to_allocate = n;
603 int first;
604
605 for_each_dpa_resource(ndd, res)
606 if (strcmp(label_id->id, res->name) == 0)
607 exist = res;
608
609 valid.start = nd_mapping->start;
610 valid.end = mapping_end;
611 valid.name = "free space";
612 retry:
613 first = 0;
614 for_each_dpa_resource(ndd, res) {
615 struct resource *next = res->sibling, *new_res = NULL;
616 resource_size_t allocate, available = 0;
617 enum alloc_loc loc = ALLOC_ERR;
618 const char *action;
619 int rc = 0;
620
621 /* ignore resources outside this nd_mapping */
622 if (res->start > mapping_end)
623 continue;
624 if (res->end < nd_mapping->start)
625 continue;
626
627 /* space at the beginning of the mapping */
628 if (!first++ && res->start > nd_mapping->start) {
629 valid.start = nd_mapping->start;
630 valid.end = res->start - 1;
631 space_valid(nd_region, ndd, label_id, NULL, next, exist,
632 to_allocate, &valid);
633 available = resource_size(&valid);
634 if (available)
635 loc = ALLOC_BEFORE;
636 }
637
638 /* space between allocations */
639 if (!loc && next) {
640 valid.start = res->start + resource_size(res);
641 valid.end = min(mapping_end, next->start - 1);
642 space_valid(nd_region, ndd, label_id, res, next, exist,
643 to_allocate, &valid);
644 available = resource_size(&valid);
645 if (available)
646 loc = ALLOC_MID;
647 }
648
649 /* space at the end of the mapping */
650 if (!loc && !next) {
651 valid.start = res->start + resource_size(res);
652 valid.end = mapping_end;
653 space_valid(nd_region, ndd, label_id, res, next, exist,
654 to_allocate, &valid);
655 available = resource_size(&valid);
656 if (available)
657 loc = ALLOC_AFTER;
658 }
659
660 if (!loc || !available)
661 continue;
662 allocate = min(available, n);
663 switch (loc) {
664 case ALLOC_BEFORE:
665 if (strcmp(res->name, label_id->id) == 0) {
666 /* adjust current resource up */
667 rc = adjust_resource(res, res->start - allocate,
668 resource_size(res) + allocate);
669 action = "cur grow up";
670 } else
671 action = "allocate";
672 break;
673 case ALLOC_MID:
674 if (strcmp(next->name, label_id->id) == 0) {
675 /* adjust next resource up */
676 rc = adjust_resource(next, next->start
677 - allocate, resource_size(next)
678 + allocate);
679 new_res = next;
680 action = "next grow up";
681 } else if (strcmp(res->name, label_id->id) == 0) {
682 action = "grow down";
683 } else
684 action = "allocate";
685 break;
686 case ALLOC_AFTER:
687 if (strcmp(res->name, label_id->id) == 0)
688 action = "grow down";
689 else
690 action = "allocate";
691 break;
692 default:
693 return n;
694 }
695
696 if (strcmp(action, "allocate") == 0) {
697 /* BLK allocate bottom up */
698 if (!is_pmem)
699 valid.start += available - allocate;
700
701 new_res = nvdimm_allocate_dpa(ndd, label_id,
702 valid.start, allocate);
703 if (!new_res)
704 rc = -EBUSY;
705 } else if (strcmp(action, "grow down") == 0) {
706 /* adjust current resource down */
707 rc = adjust_resource(res, res->start, resource_size(res)
708 + allocate);
709 if (rc == 0)
710 res->flags |= DPA_RESOURCE_ADJUSTED;
711 }
712
713 if (!new_res)
714 new_res = res;
715
716 nd_dbg_dpa(nd_region, ndd, new_res, "%s(%d) %d\n",
717 action, loc, rc);
718
719 if (rc)
720 return n;
721
722 n -= allocate;
723 if (n) {
724 /*
725 * Retry scan with newly inserted resources.
726 * For example, if we did an ALLOC_BEFORE
727 * insertion there may also have been space
728 * available for an ALLOC_AFTER insertion, so we
729 * need to check this same resource again
730 */
731 goto retry;
732 } else
733 return 0;
734 }
735
736 /*
737 * If we allocated nothing in the BLK case it may be because we are in
738 * an initial "pmem-reserve pass". Only do an initial BLK allocation
739 * when none of the DPA space is reserved.
740 */
741 if ((is_pmem || !ndd->dpa.child) && n == to_allocate)
742 return init_dpa_allocation(label_id, nd_region, nd_mapping, n);
743 return n;
744 }
745
merge_dpa(struct nd_region * nd_region,struct nd_mapping * nd_mapping,struct nd_label_id * label_id)746 static int merge_dpa(struct nd_region *nd_region,
747 struct nd_mapping *nd_mapping, struct nd_label_id *label_id)
748 {
749 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
750 struct resource *res;
751
752 if (strncmp("pmem", label_id->id, 4) == 0)
753 return 0;
754 retry:
755 for_each_dpa_resource(ndd, res) {
756 int rc;
757 struct resource *next = res->sibling;
758 resource_size_t end = res->start + resource_size(res);
759
760 if (!next || strcmp(res->name, label_id->id) != 0
761 || strcmp(next->name, label_id->id) != 0
762 || end != next->start)
763 continue;
764 end += resource_size(next);
765 nvdimm_free_dpa(ndd, next);
766 rc = adjust_resource(res, res->start, end - res->start);
767 nd_dbg_dpa(nd_region, ndd, res, "merge %d\n", rc);
768 if (rc)
769 return rc;
770 res->flags |= DPA_RESOURCE_ADJUSTED;
771 goto retry;
772 }
773
774 return 0;
775 }
776
__reserve_free_pmem(struct device * dev,void * data)777 int __reserve_free_pmem(struct device *dev, void *data)
778 {
779 struct nvdimm *nvdimm = data;
780 struct nd_region *nd_region;
781 struct nd_label_id label_id;
782 int i;
783
784 if (!is_memory(dev))
785 return 0;
786
787 nd_region = to_nd_region(dev);
788 if (nd_region->ndr_mappings == 0)
789 return 0;
790
791 memset(&label_id, 0, sizeof(label_id));
792 strcat(label_id.id, "pmem-reserve");
793 for (i = 0; i < nd_region->ndr_mappings; i++) {
794 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
795 resource_size_t n, rem = 0;
796
797 if (nd_mapping->nvdimm != nvdimm)
798 continue;
799
800 n = nd_pmem_available_dpa(nd_region, nd_mapping, &rem);
801 if (n == 0)
802 return 0;
803 rem = scan_allocate(nd_region, nd_mapping, &label_id, n);
804 dev_WARN_ONCE(&nd_region->dev, rem,
805 "pmem reserve underrun: %#llx of %#llx bytes\n",
806 (unsigned long long) n - rem,
807 (unsigned long long) n);
808 return rem ? -ENXIO : 0;
809 }
810
811 return 0;
812 }
813
release_free_pmem(struct nvdimm_bus * nvdimm_bus,struct nd_mapping * nd_mapping)814 void release_free_pmem(struct nvdimm_bus *nvdimm_bus,
815 struct nd_mapping *nd_mapping)
816 {
817 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
818 struct resource *res, *_res;
819
820 for_each_dpa_resource_safe(ndd, res, _res)
821 if (strcmp(res->name, "pmem-reserve") == 0)
822 nvdimm_free_dpa(ndd, res);
823 }
824
reserve_free_pmem(struct nvdimm_bus * nvdimm_bus,struct nd_mapping * nd_mapping)825 static int reserve_free_pmem(struct nvdimm_bus *nvdimm_bus,
826 struct nd_mapping *nd_mapping)
827 {
828 struct nvdimm *nvdimm = nd_mapping->nvdimm;
829 int rc;
830
831 rc = device_for_each_child(&nvdimm_bus->dev, nvdimm,
832 __reserve_free_pmem);
833 if (rc)
834 release_free_pmem(nvdimm_bus, nd_mapping);
835 return rc;
836 }
837
838 /**
839 * grow_dpa_allocation - for each dimm allocate n bytes for @label_id
840 * @nd_region: the set of dimms to allocate @n more bytes from
841 * @label_id: unique identifier for the namespace consuming this dpa range
842 * @n: number of bytes per-dimm to add to the existing allocation
843 *
844 * Assumes resources are ordered. For BLK regions, first consume
845 * BLK-only available DPA free space, then consume PMEM-aliased DPA
846 * space starting at the highest DPA. For PMEM regions start
847 * allocations from the start of an interleave set and end at the first
848 * BLK allocation or the end of the interleave set, whichever comes
849 * first.
850 */
grow_dpa_allocation(struct nd_region * nd_region,struct nd_label_id * label_id,resource_size_t n)851 static int grow_dpa_allocation(struct nd_region *nd_region,
852 struct nd_label_id *label_id, resource_size_t n)
853 {
854 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
855 bool is_pmem = strncmp(label_id->id, "pmem", 4) == 0;
856 int i;
857
858 for (i = 0; i < nd_region->ndr_mappings; i++) {
859 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
860 resource_size_t rem = n;
861 int rc, j;
862
863 /*
864 * In the BLK case try once with all unallocated PMEM
865 * reserved, and once without
866 */
867 for (j = is_pmem; j < 2; j++) {
868 bool blk_only = j == 0;
869
870 if (blk_only) {
871 rc = reserve_free_pmem(nvdimm_bus, nd_mapping);
872 if (rc)
873 return rc;
874 }
875 rem = scan_allocate(nd_region, nd_mapping,
876 label_id, rem);
877 if (blk_only)
878 release_free_pmem(nvdimm_bus, nd_mapping);
879
880 /* try again and allow encroachments into PMEM */
881 if (rem == 0)
882 break;
883 }
884
885 dev_WARN_ONCE(&nd_region->dev, rem,
886 "allocation underrun: %#llx of %#llx bytes\n",
887 (unsigned long long) n - rem,
888 (unsigned long long) n);
889 if (rem)
890 return -ENXIO;
891
892 rc = merge_dpa(nd_region, nd_mapping, label_id);
893 if (rc)
894 return rc;
895 }
896
897 return 0;
898 }
899
nd_namespace_pmem_set_resource(struct nd_region * nd_region,struct nd_namespace_pmem * nspm,resource_size_t size)900 static void nd_namespace_pmem_set_resource(struct nd_region *nd_region,
901 struct nd_namespace_pmem *nspm, resource_size_t size)
902 {
903 struct resource *res = &nspm->nsio.res;
904 resource_size_t offset = 0;
905
906 if (size && !nspm->uuid) {
907 WARN_ON_ONCE(1);
908 size = 0;
909 }
910
911 if (size && nspm->uuid) {
912 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
913 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
914 struct nd_label_id label_id;
915 struct resource *res;
916
917 if (!ndd) {
918 size = 0;
919 goto out;
920 }
921
922 nd_label_gen_id(&label_id, nspm->uuid, 0);
923
924 /* calculate a spa offset from the dpa allocation offset */
925 for_each_dpa_resource(ndd, res)
926 if (strcmp(res->name, label_id.id) == 0) {
927 offset = (res->start - nd_mapping->start)
928 * nd_region->ndr_mappings;
929 goto out;
930 }
931
932 WARN_ON_ONCE(1);
933 size = 0;
934 }
935
936 out:
937 res->start = nd_region->ndr_start + offset;
938 res->end = res->start + size - 1;
939 }
940
uuid_not_set(const u8 * uuid,struct device * dev,const char * where)941 static bool uuid_not_set(const u8 *uuid, struct device *dev, const char *where)
942 {
943 if (!uuid) {
944 dev_dbg(dev, "%s: uuid not set\n", where);
945 return true;
946 }
947 return false;
948 }
949
__size_store(struct device * dev,unsigned long long val)950 static ssize_t __size_store(struct device *dev, unsigned long long val)
951 {
952 resource_size_t allocated = 0, available = 0;
953 struct nd_region *nd_region = to_nd_region(dev->parent);
954 struct nd_namespace_common *ndns = to_ndns(dev);
955 struct nd_mapping *nd_mapping;
956 struct nvdimm_drvdata *ndd;
957 struct nd_label_id label_id;
958 u32 flags = 0, remainder;
959 int rc, i, id = -1;
960 u8 *uuid = NULL;
961
962 if (dev->driver || ndns->claim)
963 return -EBUSY;
964
965 if (is_namespace_pmem(dev)) {
966 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
967
968 uuid = nspm->uuid;
969 id = nspm->id;
970 } else if (is_namespace_blk(dev)) {
971 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
972
973 uuid = nsblk->uuid;
974 flags = NSLABEL_FLAG_LOCAL;
975 id = nsblk->id;
976 }
977
978 /*
979 * We need a uuid for the allocation-label and dimm(s) on which
980 * to store the label.
981 */
982 if (uuid_not_set(uuid, dev, __func__))
983 return -ENXIO;
984 if (nd_region->ndr_mappings == 0) {
985 dev_dbg(dev, "not associated with dimm(s)\n");
986 return -ENXIO;
987 }
988
989 div_u64_rem(val, nd_region->align, &remainder);
990 if (remainder) {
991 dev_dbg(dev, "%llu is not %ldK aligned\n", val,
992 nd_region->align / SZ_1K);
993 return -EINVAL;
994 }
995
996 nd_label_gen_id(&label_id, uuid, flags);
997 for (i = 0; i < nd_region->ndr_mappings; i++) {
998 nd_mapping = &nd_region->mapping[i];
999 ndd = to_ndd(nd_mapping);
1000
1001 /*
1002 * All dimms in an interleave set, or the base dimm for a blk
1003 * region, need to be enabled for the size to be changed.
1004 */
1005 if (!ndd)
1006 return -ENXIO;
1007
1008 allocated += nvdimm_allocated_dpa(ndd, &label_id);
1009 }
1010 available = nd_region_allocatable_dpa(nd_region);
1011
1012 if (val > available + allocated)
1013 return -ENOSPC;
1014
1015 if (val == allocated)
1016 return 0;
1017
1018 val = div_u64(val, nd_region->ndr_mappings);
1019 allocated = div_u64(allocated, nd_region->ndr_mappings);
1020 if (val < allocated)
1021 rc = shrink_dpa_allocation(nd_region, &label_id,
1022 allocated - val);
1023 else
1024 rc = grow_dpa_allocation(nd_region, &label_id, val - allocated);
1025
1026 if (rc)
1027 return rc;
1028
1029 if (is_namespace_pmem(dev)) {
1030 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1031
1032 nd_namespace_pmem_set_resource(nd_region, nspm,
1033 val * nd_region->ndr_mappings);
1034 }
1035
1036 /*
1037 * Try to delete the namespace if we deleted all of its
1038 * allocation, this is not the seed or 0th device for the
1039 * region, and it is not actively claimed by a btt, pfn, or dax
1040 * instance.
1041 */
1042 if (val == 0 && id != 0 && nd_region->ns_seed != dev && !ndns->claim)
1043 nd_device_unregister(dev, ND_ASYNC);
1044
1045 return rc;
1046 }
1047
size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1048 static ssize_t size_store(struct device *dev,
1049 struct device_attribute *attr, const char *buf, size_t len)
1050 {
1051 struct nd_region *nd_region = to_nd_region(dev->parent);
1052 unsigned long long val;
1053 u8 **uuid = NULL;
1054 int rc;
1055
1056 rc = kstrtoull(buf, 0, &val);
1057 if (rc)
1058 return rc;
1059
1060 nd_device_lock(dev);
1061 nvdimm_bus_lock(dev);
1062 wait_nvdimm_bus_probe_idle(dev);
1063 rc = __size_store(dev, val);
1064 if (rc >= 0)
1065 rc = nd_namespace_label_update(nd_region, dev);
1066
1067 if (is_namespace_pmem(dev)) {
1068 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1069
1070 uuid = &nspm->uuid;
1071 } else if (is_namespace_blk(dev)) {
1072 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1073
1074 uuid = &nsblk->uuid;
1075 }
1076
1077 if (rc == 0 && val == 0 && uuid) {
1078 /* setting size zero == 'delete namespace' */
1079 kfree(*uuid);
1080 *uuid = NULL;
1081 }
1082
1083 dev_dbg(dev, "%llx %s (%d)\n", val, rc < 0 ? "fail" : "success", rc);
1084
1085 nvdimm_bus_unlock(dev);
1086 nd_device_unlock(dev);
1087
1088 return rc < 0 ? rc : len;
1089 }
1090
__nvdimm_namespace_capacity(struct nd_namespace_common * ndns)1091 resource_size_t __nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1092 {
1093 struct device *dev = &ndns->dev;
1094
1095 if (is_namespace_pmem(dev)) {
1096 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1097
1098 return resource_size(&nspm->nsio.res);
1099 } else if (is_namespace_blk(dev)) {
1100 return nd_namespace_blk_size(to_nd_namespace_blk(dev));
1101 } else if (is_namespace_io(dev)) {
1102 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1103
1104 return resource_size(&nsio->res);
1105 } else
1106 WARN_ONCE(1, "unknown namespace type\n");
1107 return 0;
1108 }
1109
nvdimm_namespace_capacity(struct nd_namespace_common * ndns)1110 resource_size_t nvdimm_namespace_capacity(struct nd_namespace_common *ndns)
1111 {
1112 resource_size_t size;
1113
1114 nvdimm_bus_lock(&ndns->dev);
1115 size = __nvdimm_namespace_capacity(ndns);
1116 nvdimm_bus_unlock(&ndns->dev);
1117
1118 return size;
1119 }
1120 EXPORT_SYMBOL(nvdimm_namespace_capacity);
1121
nvdimm_namespace_locked(struct nd_namespace_common * ndns)1122 bool nvdimm_namespace_locked(struct nd_namespace_common *ndns)
1123 {
1124 int i;
1125 bool locked = false;
1126 struct device *dev = &ndns->dev;
1127 struct nd_region *nd_region = to_nd_region(dev->parent);
1128
1129 for (i = 0; i < nd_region->ndr_mappings; i++) {
1130 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1131 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1132
1133 if (test_bit(NDD_LOCKED, &nvdimm->flags)) {
1134 dev_dbg(dev, "%s locked\n", nvdimm_name(nvdimm));
1135 locked = true;
1136 }
1137 }
1138 return locked;
1139 }
1140 EXPORT_SYMBOL(nvdimm_namespace_locked);
1141
size_show(struct device * dev,struct device_attribute * attr,char * buf)1142 static ssize_t size_show(struct device *dev,
1143 struct device_attribute *attr, char *buf)
1144 {
1145 return sprintf(buf, "%llu\n", (unsigned long long)
1146 nvdimm_namespace_capacity(to_ndns(dev)));
1147 }
1148 static DEVICE_ATTR(size, 0444, size_show, size_store);
1149
namespace_to_uuid(struct device * dev)1150 static u8 *namespace_to_uuid(struct device *dev)
1151 {
1152 if (is_namespace_pmem(dev)) {
1153 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1154
1155 return nspm->uuid;
1156 } else if (is_namespace_blk(dev)) {
1157 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1158
1159 return nsblk->uuid;
1160 } else
1161 return ERR_PTR(-ENXIO);
1162 }
1163
uuid_show(struct device * dev,struct device_attribute * attr,char * buf)1164 static ssize_t uuid_show(struct device *dev,
1165 struct device_attribute *attr, char *buf)
1166 {
1167 u8 *uuid = namespace_to_uuid(dev);
1168
1169 if (IS_ERR(uuid))
1170 return PTR_ERR(uuid);
1171 if (uuid)
1172 return sprintf(buf, "%pUb\n", uuid);
1173 return sprintf(buf, "\n");
1174 }
1175
1176 /**
1177 * namespace_update_uuid - check for a unique uuid and whether we're "renaming"
1178 * @nd_region: parent region so we can updates all dimms in the set
1179 * @dev: namespace type for generating label_id
1180 * @new_uuid: incoming uuid
1181 * @old_uuid: reference to the uuid storage location in the namespace object
1182 */
namespace_update_uuid(struct nd_region * nd_region,struct device * dev,u8 * new_uuid,u8 ** old_uuid)1183 static int namespace_update_uuid(struct nd_region *nd_region,
1184 struct device *dev, u8 *new_uuid, u8 **old_uuid)
1185 {
1186 u32 flags = is_namespace_blk(dev) ? NSLABEL_FLAG_LOCAL : 0;
1187 struct nd_label_id old_label_id;
1188 struct nd_label_id new_label_id;
1189 int i;
1190
1191 if (!nd_is_uuid_unique(dev, new_uuid))
1192 return -EINVAL;
1193
1194 if (*old_uuid == NULL)
1195 goto out;
1196
1197 /*
1198 * If we've already written a label with this uuid, then it's
1199 * too late to rename because we can't reliably update the uuid
1200 * without losing the old namespace. Userspace must delete this
1201 * namespace to abandon the old uuid.
1202 */
1203 for (i = 0; i < nd_region->ndr_mappings; i++) {
1204 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1205
1206 /*
1207 * This check by itself is sufficient because old_uuid
1208 * would be NULL above if this uuid did not exist in the
1209 * currently written set.
1210 *
1211 * FIXME: can we delete uuid with zero dpa allocated?
1212 */
1213 if (list_empty(&nd_mapping->labels))
1214 return -EBUSY;
1215 }
1216
1217 nd_label_gen_id(&old_label_id, *old_uuid, flags);
1218 nd_label_gen_id(&new_label_id, new_uuid, flags);
1219 for (i = 0; i < nd_region->ndr_mappings; i++) {
1220 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1221 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1222 struct nd_label_ent *label_ent;
1223 struct resource *res;
1224
1225 for_each_dpa_resource(ndd, res)
1226 if (strcmp(res->name, old_label_id.id) == 0)
1227 sprintf((void *) res->name, "%s",
1228 new_label_id.id);
1229
1230 mutex_lock(&nd_mapping->lock);
1231 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1232 struct nd_namespace_label *nd_label = label_ent->label;
1233 struct nd_label_id label_id;
1234
1235 if (!nd_label)
1236 continue;
1237 nd_label_gen_id(&label_id, nd_label->uuid,
1238 __le32_to_cpu(nd_label->flags));
1239 if (strcmp(old_label_id.id, label_id.id) == 0)
1240 set_bit(ND_LABEL_REAP, &label_ent->flags);
1241 }
1242 mutex_unlock(&nd_mapping->lock);
1243 }
1244 kfree(*old_uuid);
1245 out:
1246 *old_uuid = new_uuid;
1247 return 0;
1248 }
1249
uuid_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1250 static ssize_t uuid_store(struct device *dev,
1251 struct device_attribute *attr, const char *buf, size_t len)
1252 {
1253 struct nd_region *nd_region = to_nd_region(dev->parent);
1254 u8 *uuid = NULL;
1255 ssize_t rc = 0;
1256 u8 **ns_uuid;
1257
1258 if (is_namespace_pmem(dev)) {
1259 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1260
1261 ns_uuid = &nspm->uuid;
1262 } else if (is_namespace_blk(dev)) {
1263 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1264
1265 ns_uuid = &nsblk->uuid;
1266 } else
1267 return -ENXIO;
1268
1269 nd_device_lock(dev);
1270 nvdimm_bus_lock(dev);
1271 wait_nvdimm_bus_probe_idle(dev);
1272 if (to_ndns(dev)->claim)
1273 rc = -EBUSY;
1274 if (rc >= 0)
1275 rc = nd_uuid_store(dev, &uuid, buf, len);
1276 if (rc >= 0)
1277 rc = namespace_update_uuid(nd_region, dev, uuid, ns_uuid);
1278 if (rc >= 0)
1279 rc = nd_namespace_label_update(nd_region, dev);
1280 else
1281 kfree(uuid);
1282 dev_dbg(dev, "result: %zd wrote: %s%s", rc, buf,
1283 buf[len - 1] == '\n' ? "" : "\n");
1284 nvdimm_bus_unlock(dev);
1285 nd_device_unlock(dev);
1286
1287 return rc < 0 ? rc : len;
1288 }
1289 static DEVICE_ATTR_RW(uuid);
1290
resource_show(struct device * dev,struct device_attribute * attr,char * buf)1291 static ssize_t resource_show(struct device *dev,
1292 struct device_attribute *attr, char *buf)
1293 {
1294 struct resource *res;
1295
1296 if (is_namespace_pmem(dev)) {
1297 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1298
1299 res = &nspm->nsio.res;
1300 } else if (is_namespace_io(dev)) {
1301 struct nd_namespace_io *nsio = to_nd_namespace_io(dev);
1302
1303 res = &nsio->res;
1304 } else
1305 return -ENXIO;
1306
1307 /* no address to convey if the namespace has no allocation */
1308 if (resource_size(res) == 0)
1309 return -ENXIO;
1310 return sprintf(buf, "%#llx\n", (unsigned long long) res->start);
1311 }
1312 static DEVICE_ATTR_ADMIN_RO(resource);
1313
1314 static const unsigned long blk_lbasize_supported[] = { 512, 520, 528,
1315 4096, 4104, 4160, 4224, 0 };
1316
1317 static const unsigned long pmem_lbasize_supported[] = { 512, 4096, 0 };
1318
sector_size_show(struct device * dev,struct device_attribute * attr,char * buf)1319 static ssize_t sector_size_show(struct device *dev,
1320 struct device_attribute *attr, char *buf)
1321 {
1322 if (is_namespace_blk(dev)) {
1323 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1324
1325 return nd_size_select_show(nsblk->lbasize,
1326 blk_lbasize_supported, buf);
1327 }
1328
1329 if (is_namespace_pmem(dev)) {
1330 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1331
1332 return nd_size_select_show(nspm->lbasize,
1333 pmem_lbasize_supported, buf);
1334 }
1335 return -ENXIO;
1336 }
1337
sector_size_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1338 static ssize_t sector_size_store(struct device *dev,
1339 struct device_attribute *attr, const char *buf, size_t len)
1340 {
1341 struct nd_region *nd_region = to_nd_region(dev->parent);
1342 const unsigned long *supported;
1343 unsigned long *lbasize;
1344 ssize_t rc = 0;
1345
1346 if (is_namespace_blk(dev)) {
1347 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1348
1349 lbasize = &nsblk->lbasize;
1350 supported = blk_lbasize_supported;
1351 } else if (is_namespace_pmem(dev)) {
1352 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1353
1354 lbasize = &nspm->lbasize;
1355 supported = pmem_lbasize_supported;
1356 } else
1357 return -ENXIO;
1358
1359 nd_device_lock(dev);
1360 nvdimm_bus_lock(dev);
1361 if (to_ndns(dev)->claim)
1362 rc = -EBUSY;
1363 if (rc >= 0)
1364 rc = nd_size_select_store(dev, buf, lbasize, supported);
1365 if (rc >= 0)
1366 rc = nd_namespace_label_update(nd_region, dev);
1367 dev_dbg(dev, "result: %zd %s: %s%s", rc, rc < 0 ? "tried" : "wrote",
1368 buf, buf[len - 1] == '\n' ? "" : "\n");
1369 nvdimm_bus_unlock(dev);
1370 nd_device_unlock(dev);
1371
1372 return rc ? rc : len;
1373 }
1374 static DEVICE_ATTR_RW(sector_size);
1375
dpa_extents_show(struct device * dev,struct device_attribute * attr,char * buf)1376 static ssize_t dpa_extents_show(struct device *dev,
1377 struct device_attribute *attr, char *buf)
1378 {
1379 struct nd_region *nd_region = to_nd_region(dev->parent);
1380 struct nd_label_id label_id;
1381 int count = 0, i;
1382 u8 *uuid = NULL;
1383 u32 flags = 0;
1384
1385 nvdimm_bus_lock(dev);
1386 if (is_namespace_pmem(dev)) {
1387 struct nd_namespace_pmem *nspm = to_nd_namespace_pmem(dev);
1388
1389 uuid = nspm->uuid;
1390 flags = 0;
1391 } else if (is_namespace_blk(dev)) {
1392 struct nd_namespace_blk *nsblk = to_nd_namespace_blk(dev);
1393
1394 uuid = nsblk->uuid;
1395 flags = NSLABEL_FLAG_LOCAL;
1396 }
1397
1398 if (!uuid)
1399 goto out;
1400
1401 nd_label_gen_id(&label_id, uuid, flags);
1402 for (i = 0; i < nd_region->ndr_mappings; i++) {
1403 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1404 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1405 struct resource *res;
1406
1407 for_each_dpa_resource(ndd, res)
1408 if (strcmp(res->name, label_id.id) == 0)
1409 count++;
1410 }
1411 out:
1412 nvdimm_bus_unlock(dev);
1413
1414 return sprintf(buf, "%d\n", count);
1415 }
1416 static DEVICE_ATTR_RO(dpa_extents);
1417
btt_claim_class(struct device * dev)1418 static int btt_claim_class(struct device *dev)
1419 {
1420 struct nd_region *nd_region = to_nd_region(dev->parent);
1421 int i, loop_bitmask = 0;
1422
1423 for (i = 0; i < nd_region->ndr_mappings; i++) {
1424 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1425 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1426 struct nd_namespace_index *nsindex;
1427
1428 /*
1429 * If any of the DIMMs do not support labels the only
1430 * possible BTT format is v1.
1431 */
1432 if (!ndd) {
1433 loop_bitmask = 0;
1434 break;
1435 }
1436
1437 nsindex = to_namespace_index(ndd, ndd->ns_current);
1438 if (nsindex == NULL)
1439 loop_bitmask |= 1;
1440 else {
1441 /* check whether existing labels are v1.1 or v1.2 */
1442 if (__le16_to_cpu(nsindex->major) == 1
1443 && __le16_to_cpu(nsindex->minor) == 1)
1444 loop_bitmask |= 2;
1445 else
1446 loop_bitmask |= 4;
1447 }
1448 }
1449 /*
1450 * If nsindex is null loop_bitmask's bit 0 will be set, and if an index
1451 * block is found, a v1.1 label for any mapping will set bit 1, and a
1452 * v1.2 label will set bit 2.
1453 *
1454 * At the end of the loop, at most one of the three bits must be set.
1455 * If multiple bits were set, it means the different mappings disagree
1456 * about their labels, and this must be cleaned up first.
1457 *
1458 * If all the label index blocks are found to agree, nsindex of NULL
1459 * implies labels haven't been initialized yet, and when they will,
1460 * they will be of the 1.2 format, so we can assume BTT2.0
1461 *
1462 * If 1.1 labels are found, we enforce BTT1.1, and if 1.2 labels are
1463 * found, we enforce BTT2.0
1464 *
1465 * If the loop was never entered, default to BTT1.1 (legacy namespaces)
1466 */
1467 switch (loop_bitmask) {
1468 case 0:
1469 case 2:
1470 return NVDIMM_CCLASS_BTT;
1471 case 1:
1472 case 4:
1473 return NVDIMM_CCLASS_BTT2;
1474 default:
1475 return -ENXIO;
1476 }
1477 }
1478
holder_show(struct device * dev,struct device_attribute * attr,char * buf)1479 static ssize_t holder_show(struct device *dev,
1480 struct device_attribute *attr, char *buf)
1481 {
1482 struct nd_namespace_common *ndns = to_ndns(dev);
1483 ssize_t rc;
1484
1485 nd_device_lock(dev);
1486 rc = sprintf(buf, "%s\n", ndns->claim ? dev_name(ndns->claim) : "");
1487 nd_device_unlock(dev);
1488
1489 return rc;
1490 }
1491 static DEVICE_ATTR_RO(holder);
1492
__holder_class_store(struct device * dev,const char * buf)1493 static int __holder_class_store(struct device *dev, const char *buf)
1494 {
1495 struct nd_namespace_common *ndns = to_ndns(dev);
1496
1497 if (dev->driver || ndns->claim)
1498 return -EBUSY;
1499
1500 if (sysfs_streq(buf, "btt")) {
1501 int rc = btt_claim_class(dev);
1502
1503 if (rc < NVDIMM_CCLASS_NONE)
1504 return rc;
1505 ndns->claim_class = rc;
1506 } else if (sysfs_streq(buf, "pfn"))
1507 ndns->claim_class = NVDIMM_CCLASS_PFN;
1508 else if (sysfs_streq(buf, "dax"))
1509 ndns->claim_class = NVDIMM_CCLASS_DAX;
1510 else if (sysfs_streq(buf, ""))
1511 ndns->claim_class = NVDIMM_CCLASS_NONE;
1512 else
1513 return -EINVAL;
1514
1515 return 0;
1516 }
1517
holder_class_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1518 static ssize_t holder_class_store(struct device *dev,
1519 struct device_attribute *attr, const char *buf, size_t len)
1520 {
1521 struct nd_region *nd_region = to_nd_region(dev->parent);
1522 int rc;
1523
1524 nd_device_lock(dev);
1525 nvdimm_bus_lock(dev);
1526 wait_nvdimm_bus_probe_idle(dev);
1527 rc = __holder_class_store(dev, buf);
1528 if (rc >= 0)
1529 rc = nd_namespace_label_update(nd_region, dev);
1530 dev_dbg(dev, "%s(%d)\n", rc < 0 ? "fail " : "", rc);
1531 nvdimm_bus_unlock(dev);
1532 nd_device_unlock(dev);
1533
1534 return rc < 0 ? rc : len;
1535 }
1536
holder_class_show(struct device * dev,struct device_attribute * attr,char * buf)1537 static ssize_t holder_class_show(struct device *dev,
1538 struct device_attribute *attr, char *buf)
1539 {
1540 struct nd_namespace_common *ndns = to_ndns(dev);
1541 ssize_t rc;
1542
1543 nd_device_lock(dev);
1544 if (ndns->claim_class == NVDIMM_CCLASS_NONE)
1545 rc = sprintf(buf, "\n");
1546 else if ((ndns->claim_class == NVDIMM_CCLASS_BTT) ||
1547 (ndns->claim_class == NVDIMM_CCLASS_BTT2))
1548 rc = sprintf(buf, "btt\n");
1549 else if (ndns->claim_class == NVDIMM_CCLASS_PFN)
1550 rc = sprintf(buf, "pfn\n");
1551 else if (ndns->claim_class == NVDIMM_CCLASS_DAX)
1552 rc = sprintf(buf, "dax\n");
1553 else
1554 rc = sprintf(buf, "<unknown>\n");
1555 nd_device_unlock(dev);
1556
1557 return rc;
1558 }
1559 static DEVICE_ATTR_RW(holder_class);
1560
mode_show(struct device * dev,struct device_attribute * attr,char * buf)1561 static ssize_t mode_show(struct device *dev,
1562 struct device_attribute *attr, char *buf)
1563 {
1564 struct nd_namespace_common *ndns = to_ndns(dev);
1565 struct device *claim;
1566 char *mode;
1567 ssize_t rc;
1568
1569 nd_device_lock(dev);
1570 claim = ndns->claim;
1571 if (claim && is_nd_btt(claim))
1572 mode = "safe";
1573 else if (claim && is_nd_pfn(claim))
1574 mode = "memory";
1575 else if (claim && is_nd_dax(claim))
1576 mode = "dax";
1577 else if (!claim && pmem_should_map_pages(dev))
1578 mode = "memory";
1579 else
1580 mode = "raw";
1581 rc = sprintf(buf, "%s\n", mode);
1582 nd_device_unlock(dev);
1583
1584 return rc;
1585 }
1586 static DEVICE_ATTR_RO(mode);
1587
force_raw_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)1588 static ssize_t force_raw_store(struct device *dev,
1589 struct device_attribute *attr, const char *buf, size_t len)
1590 {
1591 bool force_raw;
1592 int rc = strtobool(buf, &force_raw);
1593
1594 if (rc)
1595 return rc;
1596
1597 to_ndns(dev)->force_raw = force_raw;
1598 return len;
1599 }
1600
force_raw_show(struct device * dev,struct device_attribute * attr,char * buf)1601 static ssize_t force_raw_show(struct device *dev,
1602 struct device_attribute *attr, char *buf)
1603 {
1604 return sprintf(buf, "%d\n", to_ndns(dev)->force_raw);
1605 }
1606 static DEVICE_ATTR_RW(force_raw);
1607
1608 static struct attribute *nd_namespace_attributes[] = {
1609 &dev_attr_nstype.attr,
1610 &dev_attr_size.attr,
1611 &dev_attr_mode.attr,
1612 &dev_attr_uuid.attr,
1613 &dev_attr_holder.attr,
1614 &dev_attr_resource.attr,
1615 &dev_attr_alt_name.attr,
1616 &dev_attr_force_raw.attr,
1617 &dev_attr_sector_size.attr,
1618 &dev_attr_dpa_extents.attr,
1619 &dev_attr_holder_class.attr,
1620 NULL,
1621 };
1622
namespace_visible(struct kobject * kobj,struct attribute * a,int n)1623 static umode_t namespace_visible(struct kobject *kobj,
1624 struct attribute *a, int n)
1625 {
1626 struct device *dev = container_of(kobj, struct device, kobj);
1627
1628 if (a == &dev_attr_resource.attr && is_namespace_blk(dev))
1629 return 0;
1630
1631 if (is_namespace_pmem(dev) || is_namespace_blk(dev)) {
1632 if (a == &dev_attr_size.attr)
1633 return 0644;
1634
1635 return a->mode;
1636 }
1637
1638 if (a == &dev_attr_nstype.attr || a == &dev_attr_size.attr
1639 || a == &dev_attr_holder.attr
1640 || a == &dev_attr_holder_class.attr
1641 || a == &dev_attr_force_raw.attr
1642 || a == &dev_attr_mode.attr)
1643 return a->mode;
1644
1645 return 0;
1646 }
1647
1648 static struct attribute_group nd_namespace_attribute_group = {
1649 .attrs = nd_namespace_attributes,
1650 .is_visible = namespace_visible,
1651 };
1652
1653 static const struct attribute_group *nd_namespace_attribute_groups[] = {
1654 &nd_device_attribute_group,
1655 &nd_namespace_attribute_group,
1656 &nd_numa_attribute_group,
1657 NULL,
1658 };
1659
1660 static const struct device_type namespace_io_device_type = {
1661 .name = "nd_namespace_io",
1662 .release = namespace_io_release,
1663 .groups = nd_namespace_attribute_groups,
1664 };
1665
1666 static const struct device_type namespace_pmem_device_type = {
1667 .name = "nd_namespace_pmem",
1668 .release = namespace_pmem_release,
1669 .groups = nd_namespace_attribute_groups,
1670 };
1671
1672 static const struct device_type namespace_blk_device_type = {
1673 .name = "nd_namespace_blk",
1674 .release = namespace_blk_release,
1675 .groups = nd_namespace_attribute_groups,
1676 };
1677
is_namespace_pmem(const struct device * dev)1678 static bool is_namespace_pmem(const struct device *dev)
1679 {
1680 return dev ? dev->type == &namespace_pmem_device_type : false;
1681 }
1682
is_namespace_blk(const struct device * dev)1683 static bool is_namespace_blk(const struct device *dev)
1684 {
1685 return dev ? dev->type == &namespace_blk_device_type : false;
1686 }
1687
is_namespace_io(const struct device * dev)1688 static bool is_namespace_io(const struct device *dev)
1689 {
1690 return dev ? dev->type == &namespace_io_device_type : false;
1691 }
1692
nvdimm_namespace_common_probe(struct device * dev)1693 struct nd_namespace_common *nvdimm_namespace_common_probe(struct device *dev)
1694 {
1695 struct nd_btt *nd_btt = is_nd_btt(dev) ? to_nd_btt(dev) : NULL;
1696 struct nd_pfn *nd_pfn = is_nd_pfn(dev) ? to_nd_pfn(dev) : NULL;
1697 struct nd_dax *nd_dax = is_nd_dax(dev) ? to_nd_dax(dev) : NULL;
1698 struct nd_namespace_common *ndns = NULL;
1699 resource_size_t size;
1700
1701 if (nd_btt || nd_pfn || nd_dax) {
1702 if (nd_btt)
1703 ndns = nd_btt->ndns;
1704 else if (nd_pfn)
1705 ndns = nd_pfn->ndns;
1706 else if (nd_dax)
1707 ndns = nd_dax->nd_pfn.ndns;
1708
1709 if (!ndns)
1710 return ERR_PTR(-ENODEV);
1711
1712 /*
1713 * Flush any in-progess probes / removals in the driver
1714 * for the raw personality of this namespace.
1715 */
1716 nd_device_lock(&ndns->dev);
1717 nd_device_unlock(&ndns->dev);
1718 if (ndns->dev.driver) {
1719 dev_dbg(&ndns->dev, "is active, can't bind %s\n",
1720 dev_name(dev));
1721 return ERR_PTR(-EBUSY);
1722 }
1723 if (dev_WARN_ONCE(&ndns->dev, ndns->claim != dev,
1724 "host (%s) vs claim (%s) mismatch\n",
1725 dev_name(dev),
1726 dev_name(ndns->claim)))
1727 return ERR_PTR(-ENXIO);
1728 } else {
1729 ndns = to_ndns(dev);
1730 if (ndns->claim) {
1731 dev_dbg(dev, "claimed by %s, failing probe\n",
1732 dev_name(ndns->claim));
1733
1734 return ERR_PTR(-ENXIO);
1735 }
1736 }
1737
1738 if (nvdimm_namespace_locked(ndns))
1739 return ERR_PTR(-EACCES);
1740
1741 size = nvdimm_namespace_capacity(ndns);
1742 if (size < ND_MIN_NAMESPACE_SIZE) {
1743 dev_dbg(&ndns->dev, "%pa, too small must be at least %#x\n",
1744 &size, ND_MIN_NAMESPACE_SIZE);
1745 return ERR_PTR(-ENODEV);
1746 }
1747
1748 /*
1749 * Note, alignment validation for fsdax and devdax mode
1750 * namespaces happens in nd_pfn_validate() where infoblock
1751 * padding parameters can be applied.
1752 */
1753 if (pmem_should_map_pages(dev)) {
1754 struct nd_namespace_io *nsio = to_nd_namespace_io(&ndns->dev);
1755 struct resource *res = &nsio->res;
1756
1757 if (!IS_ALIGNED(res->start | (res->end + 1),
1758 memremap_compat_align())) {
1759 dev_err(&ndns->dev, "%pr misaligned, unable to map\n", res);
1760 return ERR_PTR(-EOPNOTSUPP);
1761 }
1762 }
1763
1764 if (is_namespace_pmem(&ndns->dev)) {
1765 struct nd_namespace_pmem *nspm;
1766
1767 nspm = to_nd_namespace_pmem(&ndns->dev);
1768 if (uuid_not_set(nspm->uuid, &ndns->dev, __func__))
1769 return ERR_PTR(-ENODEV);
1770 } else if (is_namespace_blk(&ndns->dev)) {
1771 struct nd_namespace_blk *nsblk;
1772
1773 nsblk = to_nd_namespace_blk(&ndns->dev);
1774 if (uuid_not_set(nsblk->uuid, &ndns->dev, __func__))
1775 return ERR_PTR(-ENODEV);
1776 if (!nsblk->lbasize) {
1777 dev_dbg(&ndns->dev, "sector size not set\n");
1778 return ERR_PTR(-ENODEV);
1779 }
1780 if (!nd_namespace_blk_validate(nsblk))
1781 return ERR_PTR(-ENODEV);
1782 }
1783
1784 return ndns;
1785 }
1786 EXPORT_SYMBOL(nvdimm_namespace_common_probe);
1787
devm_namespace_enable(struct device * dev,struct nd_namespace_common * ndns,resource_size_t size)1788 int devm_namespace_enable(struct device *dev, struct nd_namespace_common *ndns,
1789 resource_size_t size)
1790 {
1791 if (is_namespace_blk(&ndns->dev))
1792 return 0;
1793 return devm_nsio_enable(dev, to_nd_namespace_io(&ndns->dev), size);
1794 }
1795 EXPORT_SYMBOL_GPL(devm_namespace_enable);
1796
devm_namespace_disable(struct device * dev,struct nd_namespace_common * ndns)1797 void devm_namespace_disable(struct device *dev, struct nd_namespace_common *ndns)
1798 {
1799 if (is_namespace_blk(&ndns->dev))
1800 return;
1801 devm_nsio_disable(dev, to_nd_namespace_io(&ndns->dev));
1802 }
1803 EXPORT_SYMBOL_GPL(devm_namespace_disable);
1804
create_namespace_io(struct nd_region * nd_region)1805 static struct device **create_namespace_io(struct nd_region *nd_region)
1806 {
1807 struct nd_namespace_io *nsio;
1808 struct device *dev, **devs;
1809 struct resource *res;
1810
1811 nsio = kzalloc(sizeof(*nsio), GFP_KERNEL);
1812 if (!nsio)
1813 return NULL;
1814
1815 devs = kcalloc(2, sizeof(struct device *), GFP_KERNEL);
1816 if (!devs) {
1817 kfree(nsio);
1818 return NULL;
1819 }
1820
1821 dev = &nsio->common.dev;
1822 dev->type = &namespace_io_device_type;
1823 dev->parent = &nd_region->dev;
1824 res = &nsio->res;
1825 res->name = dev_name(&nd_region->dev);
1826 res->flags = IORESOURCE_MEM;
1827 res->start = nd_region->ndr_start;
1828 res->end = res->start + nd_region->ndr_size - 1;
1829
1830 devs[0] = dev;
1831 return devs;
1832 }
1833
has_uuid_at_pos(struct nd_region * nd_region,u8 * uuid,u64 cookie,u16 pos)1834 static bool has_uuid_at_pos(struct nd_region *nd_region, u8 *uuid,
1835 u64 cookie, u16 pos)
1836 {
1837 struct nd_namespace_label *found = NULL;
1838 int i;
1839
1840 for (i = 0; i < nd_region->ndr_mappings; i++) {
1841 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1842 struct nd_interleave_set *nd_set = nd_region->nd_set;
1843 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1844 struct nd_label_ent *label_ent;
1845 bool found_uuid = false;
1846
1847 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1848 struct nd_namespace_label *nd_label = label_ent->label;
1849 u16 position, nlabel;
1850 u64 isetcookie;
1851
1852 if (!nd_label)
1853 continue;
1854 isetcookie = __le64_to_cpu(nd_label->isetcookie);
1855 position = __le16_to_cpu(nd_label->position);
1856 nlabel = __le16_to_cpu(nd_label->nlabel);
1857
1858 if (isetcookie != cookie)
1859 continue;
1860
1861 if (memcmp(nd_label->uuid, uuid, NSLABEL_UUID_LEN) != 0)
1862 continue;
1863
1864 if (namespace_label_has(ndd, type_guid)
1865 && !guid_equal(&nd_set->type_guid,
1866 &nd_label->type_guid)) {
1867 dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
1868 &nd_set->type_guid,
1869 &nd_label->type_guid);
1870 continue;
1871 }
1872
1873 if (found_uuid) {
1874 dev_dbg(ndd->dev, "duplicate entry for uuid\n");
1875 return false;
1876 }
1877 found_uuid = true;
1878 if (nlabel != nd_region->ndr_mappings)
1879 continue;
1880 if (position != pos)
1881 continue;
1882 found = nd_label;
1883 break;
1884 }
1885 if (found)
1886 break;
1887 }
1888 return found != NULL;
1889 }
1890
select_pmem_id(struct nd_region * nd_region,u8 * pmem_id)1891 static int select_pmem_id(struct nd_region *nd_region, u8 *pmem_id)
1892 {
1893 int i;
1894
1895 if (!pmem_id)
1896 return -ENODEV;
1897
1898 for (i = 0; i < nd_region->ndr_mappings; i++) {
1899 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1900 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1901 struct nd_namespace_label *nd_label = NULL;
1902 u64 hw_start, hw_end, pmem_start, pmem_end;
1903 struct nd_label_ent *label_ent;
1904
1905 lockdep_assert_held(&nd_mapping->lock);
1906 list_for_each_entry(label_ent, &nd_mapping->labels, list) {
1907 nd_label = label_ent->label;
1908 if (!nd_label)
1909 continue;
1910 if (memcmp(nd_label->uuid, pmem_id, NSLABEL_UUID_LEN) == 0)
1911 break;
1912 nd_label = NULL;
1913 }
1914
1915 if (!nd_label) {
1916 WARN_ON(1);
1917 return -EINVAL;
1918 }
1919
1920 /*
1921 * Check that this label is compliant with the dpa
1922 * range published in NFIT
1923 */
1924 hw_start = nd_mapping->start;
1925 hw_end = hw_start + nd_mapping->size;
1926 pmem_start = __le64_to_cpu(nd_label->dpa);
1927 pmem_end = pmem_start + __le64_to_cpu(nd_label->rawsize);
1928 if (pmem_start >= hw_start && pmem_start < hw_end
1929 && pmem_end <= hw_end && pmem_end > hw_start)
1930 /* pass */;
1931 else {
1932 dev_dbg(&nd_region->dev, "%s invalid label for %pUb\n",
1933 dev_name(ndd->dev), nd_label->uuid);
1934 return -EINVAL;
1935 }
1936
1937 /* move recently validated label to the front of the list */
1938 list_move(&label_ent->list, &nd_mapping->labels);
1939 }
1940 return 0;
1941 }
1942
1943 /**
1944 * create_namespace_pmem - validate interleave set labelling, retrieve label0
1945 * @nd_region: region with mappings to validate
1946 * @nspm: target namespace to create
1947 * @nd_label: target pmem namespace label to evaluate
1948 */
create_namespace_pmem(struct nd_region * nd_region,struct nd_namespace_index * nsindex,struct nd_namespace_label * nd_label)1949 static struct device *create_namespace_pmem(struct nd_region *nd_region,
1950 struct nd_namespace_index *nsindex,
1951 struct nd_namespace_label *nd_label)
1952 {
1953 u64 cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
1954 u64 altcookie = nd_region_interleave_set_altcookie(nd_region);
1955 struct nd_label_ent *label_ent;
1956 struct nd_namespace_pmem *nspm;
1957 struct nd_mapping *nd_mapping;
1958 resource_size_t size = 0;
1959 struct resource *res;
1960 struct device *dev;
1961 int rc = 0;
1962 u16 i;
1963
1964 if (cookie == 0) {
1965 dev_dbg(&nd_region->dev, "invalid interleave-set-cookie\n");
1966 return ERR_PTR(-ENXIO);
1967 }
1968
1969 if (__le64_to_cpu(nd_label->isetcookie) != cookie) {
1970 dev_dbg(&nd_region->dev, "invalid cookie in label: %pUb\n",
1971 nd_label->uuid);
1972 if (__le64_to_cpu(nd_label->isetcookie) != altcookie)
1973 return ERR_PTR(-EAGAIN);
1974
1975 dev_dbg(&nd_region->dev, "valid altcookie in label: %pUb\n",
1976 nd_label->uuid);
1977 }
1978
1979 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
1980 if (!nspm)
1981 return ERR_PTR(-ENOMEM);
1982
1983 nspm->id = -1;
1984 dev = &nspm->nsio.common.dev;
1985 dev->type = &namespace_pmem_device_type;
1986 dev->parent = &nd_region->dev;
1987 res = &nspm->nsio.res;
1988 res->name = dev_name(&nd_region->dev);
1989 res->flags = IORESOURCE_MEM;
1990
1991 for (i = 0; i < nd_region->ndr_mappings; i++) {
1992 if (has_uuid_at_pos(nd_region, nd_label->uuid, cookie, i))
1993 continue;
1994 if (has_uuid_at_pos(nd_region, nd_label->uuid, altcookie, i))
1995 continue;
1996 break;
1997 }
1998
1999 if (i < nd_region->ndr_mappings) {
2000 struct nvdimm *nvdimm = nd_region->mapping[i].nvdimm;
2001
2002 /*
2003 * Give up if we don't find an instance of a uuid at each
2004 * position (from 0 to nd_region->ndr_mappings - 1), or if we
2005 * find a dimm with two instances of the same uuid.
2006 */
2007 dev_err(&nd_region->dev, "%s missing label for %pUb\n",
2008 nvdimm_name(nvdimm), nd_label->uuid);
2009 rc = -EINVAL;
2010 goto err;
2011 }
2012
2013 /*
2014 * Fix up each mapping's 'labels' to have the validated pmem label for
2015 * that position at labels[0], and NULL at labels[1]. In the process,
2016 * check that the namespace aligns with interleave-set. We know
2017 * that it does not overlap with any blk namespaces by virtue of
2018 * the dimm being enabled (i.e. nd_label_reserve_dpa()
2019 * succeeded).
2020 */
2021 rc = select_pmem_id(nd_region, nd_label->uuid);
2022 if (rc)
2023 goto err;
2024
2025 /* Calculate total size and populate namespace properties from label0 */
2026 for (i = 0; i < nd_region->ndr_mappings; i++) {
2027 struct nd_namespace_label *label0;
2028 struct nvdimm_drvdata *ndd;
2029
2030 nd_mapping = &nd_region->mapping[i];
2031 label_ent = list_first_entry_or_null(&nd_mapping->labels,
2032 typeof(*label_ent), list);
2033 label0 = label_ent ? label_ent->label : NULL;
2034
2035 if (!label0) {
2036 WARN_ON(1);
2037 continue;
2038 }
2039
2040 size += __le64_to_cpu(label0->rawsize);
2041 if (__le16_to_cpu(label0->position) != 0)
2042 continue;
2043 WARN_ON(nspm->alt_name || nspm->uuid);
2044 nspm->alt_name = kmemdup((void __force *) label0->name,
2045 NSLABEL_NAME_LEN, GFP_KERNEL);
2046 nspm->uuid = kmemdup((void __force *) label0->uuid,
2047 NSLABEL_UUID_LEN, GFP_KERNEL);
2048 nspm->lbasize = __le64_to_cpu(label0->lbasize);
2049 ndd = to_ndd(nd_mapping);
2050 if (namespace_label_has(ndd, abstraction_guid))
2051 nspm->nsio.common.claim_class
2052 = to_nvdimm_cclass(&label0->abstraction_guid);
2053
2054 }
2055
2056 if (!nspm->alt_name || !nspm->uuid) {
2057 rc = -ENOMEM;
2058 goto err;
2059 }
2060
2061 nd_namespace_pmem_set_resource(nd_region, nspm, size);
2062
2063 return dev;
2064 err:
2065 namespace_pmem_release(dev);
2066 switch (rc) {
2067 case -EINVAL:
2068 dev_dbg(&nd_region->dev, "invalid label(s)\n");
2069 break;
2070 case -ENODEV:
2071 dev_dbg(&nd_region->dev, "label not found\n");
2072 break;
2073 default:
2074 dev_dbg(&nd_region->dev, "unexpected err: %d\n", rc);
2075 break;
2076 }
2077 return ERR_PTR(rc);
2078 }
2079
nsblk_add_resource(struct nd_region * nd_region,struct nvdimm_drvdata * ndd,struct nd_namespace_blk * nsblk,resource_size_t start)2080 struct resource *nsblk_add_resource(struct nd_region *nd_region,
2081 struct nvdimm_drvdata *ndd, struct nd_namespace_blk *nsblk,
2082 resource_size_t start)
2083 {
2084 struct nd_label_id label_id;
2085 struct resource *res;
2086
2087 nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
2088 res = krealloc(nsblk->res,
2089 sizeof(void *) * (nsblk->num_resources + 1),
2090 GFP_KERNEL);
2091 if (!res)
2092 return NULL;
2093 nsblk->res = (struct resource **) res;
2094 for_each_dpa_resource(ndd, res)
2095 if (strcmp(res->name, label_id.id) == 0
2096 && res->start == start) {
2097 nsblk->res[nsblk->num_resources++] = res;
2098 return res;
2099 }
2100 return NULL;
2101 }
2102
nd_namespace_blk_create(struct nd_region * nd_region)2103 static struct device *nd_namespace_blk_create(struct nd_region *nd_region)
2104 {
2105 struct nd_namespace_blk *nsblk;
2106 struct device *dev;
2107
2108 if (!is_nd_blk(&nd_region->dev))
2109 return NULL;
2110
2111 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2112 if (!nsblk)
2113 return NULL;
2114
2115 dev = &nsblk->common.dev;
2116 dev->type = &namespace_blk_device_type;
2117 nsblk->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
2118 if (nsblk->id < 0) {
2119 kfree(nsblk);
2120 return NULL;
2121 }
2122 dev_set_name(dev, "namespace%d.%d", nd_region->id, nsblk->id);
2123 dev->parent = &nd_region->dev;
2124
2125 return &nsblk->common.dev;
2126 }
2127
nd_namespace_pmem_create(struct nd_region * nd_region)2128 static struct device *nd_namespace_pmem_create(struct nd_region *nd_region)
2129 {
2130 struct nd_namespace_pmem *nspm;
2131 struct resource *res;
2132 struct device *dev;
2133
2134 if (!is_memory(&nd_region->dev))
2135 return NULL;
2136
2137 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
2138 if (!nspm)
2139 return NULL;
2140
2141 dev = &nspm->nsio.common.dev;
2142 dev->type = &namespace_pmem_device_type;
2143 dev->parent = &nd_region->dev;
2144 res = &nspm->nsio.res;
2145 res->name = dev_name(&nd_region->dev);
2146 res->flags = IORESOURCE_MEM;
2147
2148 nspm->id = ida_simple_get(&nd_region->ns_ida, 0, 0, GFP_KERNEL);
2149 if (nspm->id < 0) {
2150 kfree(nspm);
2151 return NULL;
2152 }
2153 dev_set_name(dev, "namespace%d.%d", nd_region->id, nspm->id);
2154 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2155
2156 return dev;
2157 }
2158
nd_region_create_ns_seed(struct nd_region * nd_region)2159 void nd_region_create_ns_seed(struct nd_region *nd_region)
2160 {
2161 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2162
2163 if (nd_region_to_nstype(nd_region) == ND_DEVICE_NAMESPACE_IO)
2164 return;
2165
2166 if (is_nd_blk(&nd_region->dev))
2167 nd_region->ns_seed = nd_namespace_blk_create(nd_region);
2168 else
2169 nd_region->ns_seed = nd_namespace_pmem_create(nd_region);
2170
2171 /*
2172 * Seed creation failures are not fatal, provisioning is simply
2173 * disabled until memory becomes available
2174 */
2175 if (!nd_region->ns_seed)
2176 dev_err(&nd_region->dev, "failed to create %s namespace\n",
2177 is_nd_blk(&nd_region->dev) ? "blk" : "pmem");
2178 else
2179 nd_device_register(nd_region->ns_seed);
2180 }
2181
nd_region_create_dax_seed(struct nd_region * nd_region)2182 void nd_region_create_dax_seed(struct nd_region *nd_region)
2183 {
2184 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2185 nd_region->dax_seed = nd_dax_create(nd_region);
2186 /*
2187 * Seed creation failures are not fatal, provisioning is simply
2188 * disabled until memory becomes available
2189 */
2190 if (!nd_region->dax_seed)
2191 dev_err(&nd_region->dev, "failed to create dax namespace\n");
2192 }
2193
nd_region_create_pfn_seed(struct nd_region * nd_region)2194 void nd_region_create_pfn_seed(struct nd_region *nd_region)
2195 {
2196 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2197 nd_region->pfn_seed = nd_pfn_create(nd_region);
2198 /*
2199 * Seed creation failures are not fatal, provisioning is simply
2200 * disabled until memory becomes available
2201 */
2202 if (!nd_region->pfn_seed)
2203 dev_err(&nd_region->dev, "failed to create pfn namespace\n");
2204 }
2205
nd_region_create_btt_seed(struct nd_region * nd_region)2206 void nd_region_create_btt_seed(struct nd_region *nd_region)
2207 {
2208 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
2209 nd_region->btt_seed = nd_btt_create(nd_region);
2210 /*
2211 * Seed creation failures are not fatal, provisioning is simply
2212 * disabled until memory becomes available
2213 */
2214 if (!nd_region->btt_seed)
2215 dev_err(&nd_region->dev, "failed to create btt namespace\n");
2216 }
2217
add_namespace_resource(struct nd_region * nd_region,struct nd_namespace_label * nd_label,struct device ** devs,int count)2218 static int add_namespace_resource(struct nd_region *nd_region,
2219 struct nd_namespace_label *nd_label, struct device **devs,
2220 int count)
2221 {
2222 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2223 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2224 int i;
2225
2226 for (i = 0; i < count; i++) {
2227 u8 *uuid = namespace_to_uuid(devs[i]);
2228 struct resource *res;
2229
2230 if (IS_ERR_OR_NULL(uuid)) {
2231 WARN_ON(1);
2232 continue;
2233 }
2234
2235 if (memcmp(uuid, nd_label->uuid, NSLABEL_UUID_LEN) != 0)
2236 continue;
2237 if (is_namespace_blk(devs[i])) {
2238 res = nsblk_add_resource(nd_region, ndd,
2239 to_nd_namespace_blk(devs[i]),
2240 __le64_to_cpu(nd_label->dpa));
2241 if (!res)
2242 return -ENXIO;
2243 nd_dbg_dpa(nd_region, ndd, res, "%d assign\n", count);
2244 } else {
2245 dev_err(&nd_region->dev,
2246 "error: conflicting extents for uuid: %pUb\n",
2247 nd_label->uuid);
2248 return -ENXIO;
2249 }
2250 break;
2251 }
2252
2253 return i;
2254 }
2255
create_namespace_blk(struct nd_region * nd_region,struct nd_namespace_label * nd_label,int count)2256 static struct device *create_namespace_blk(struct nd_region *nd_region,
2257 struct nd_namespace_label *nd_label, int count)
2258 {
2259
2260 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2261 struct nd_interleave_set *nd_set = nd_region->nd_set;
2262 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2263 struct nd_namespace_blk *nsblk;
2264 char name[NSLABEL_NAME_LEN];
2265 struct device *dev = NULL;
2266 struct resource *res;
2267
2268 if (namespace_label_has(ndd, type_guid)) {
2269 if (!guid_equal(&nd_set->type_guid, &nd_label->type_guid)) {
2270 dev_dbg(ndd->dev, "expect type_guid %pUb got %pUb\n",
2271 &nd_set->type_guid,
2272 &nd_label->type_guid);
2273 return ERR_PTR(-EAGAIN);
2274 }
2275
2276 if (nd_label->isetcookie != __cpu_to_le64(nd_set->cookie2)) {
2277 dev_dbg(ndd->dev, "expect cookie %#llx got %#llx\n",
2278 nd_set->cookie2,
2279 __le64_to_cpu(nd_label->isetcookie));
2280 return ERR_PTR(-EAGAIN);
2281 }
2282 }
2283
2284 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2285 if (!nsblk)
2286 return ERR_PTR(-ENOMEM);
2287 dev = &nsblk->common.dev;
2288 dev->type = &namespace_blk_device_type;
2289 dev->parent = &nd_region->dev;
2290 nsblk->id = -1;
2291 nsblk->lbasize = __le64_to_cpu(nd_label->lbasize);
2292 nsblk->uuid = kmemdup(nd_label->uuid, NSLABEL_UUID_LEN,
2293 GFP_KERNEL);
2294 if (namespace_label_has(ndd, abstraction_guid))
2295 nsblk->common.claim_class
2296 = to_nvdimm_cclass(&nd_label->abstraction_guid);
2297 if (!nsblk->uuid)
2298 goto blk_err;
2299 memcpy(name, nd_label->name, NSLABEL_NAME_LEN);
2300 if (name[0]) {
2301 nsblk->alt_name = kmemdup(name, NSLABEL_NAME_LEN,
2302 GFP_KERNEL);
2303 if (!nsblk->alt_name)
2304 goto blk_err;
2305 }
2306 res = nsblk_add_resource(nd_region, ndd, nsblk,
2307 __le64_to_cpu(nd_label->dpa));
2308 if (!res)
2309 goto blk_err;
2310 nd_dbg_dpa(nd_region, ndd, res, "%d: assign\n", count);
2311 return dev;
2312 blk_err:
2313 namespace_blk_release(dev);
2314 return ERR_PTR(-ENXIO);
2315 }
2316
cmp_dpa(const void * a,const void * b)2317 static int cmp_dpa(const void *a, const void *b)
2318 {
2319 const struct device *dev_a = *(const struct device **) a;
2320 const struct device *dev_b = *(const struct device **) b;
2321 struct nd_namespace_blk *nsblk_a, *nsblk_b;
2322 struct nd_namespace_pmem *nspm_a, *nspm_b;
2323
2324 if (is_namespace_io(dev_a))
2325 return 0;
2326
2327 if (is_namespace_blk(dev_a)) {
2328 nsblk_a = to_nd_namespace_blk(dev_a);
2329 nsblk_b = to_nd_namespace_blk(dev_b);
2330
2331 return memcmp(&nsblk_a->res[0]->start, &nsblk_b->res[0]->start,
2332 sizeof(resource_size_t));
2333 }
2334
2335 nspm_a = to_nd_namespace_pmem(dev_a);
2336 nspm_b = to_nd_namespace_pmem(dev_b);
2337
2338 return memcmp(&nspm_a->nsio.res.start, &nspm_b->nsio.res.start,
2339 sizeof(resource_size_t));
2340 }
2341
scan_labels(struct nd_region * nd_region)2342 static struct device **scan_labels(struct nd_region *nd_region)
2343 {
2344 int i, count = 0;
2345 struct device *dev, **devs = NULL;
2346 struct nd_label_ent *label_ent, *e;
2347 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
2348 resource_size_t map_end = nd_mapping->start + nd_mapping->size - 1;
2349
2350 /* "safe" because create_namespace_pmem() might list_move() label_ent */
2351 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
2352 struct nd_namespace_label *nd_label = label_ent->label;
2353 struct device **__devs;
2354 u32 flags;
2355
2356 if (!nd_label)
2357 continue;
2358 flags = __le32_to_cpu(nd_label->flags);
2359 if (is_nd_blk(&nd_region->dev)
2360 == !!(flags & NSLABEL_FLAG_LOCAL))
2361 /* pass, region matches label type */;
2362 else
2363 continue;
2364
2365 /* skip labels that describe extents outside of the region */
2366 if (__le64_to_cpu(nd_label->dpa) < nd_mapping->start ||
2367 __le64_to_cpu(nd_label->dpa) > map_end)
2368 continue;
2369
2370 i = add_namespace_resource(nd_region, nd_label, devs, count);
2371 if (i < 0)
2372 goto err;
2373 if (i < count)
2374 continue;
2375 __devs = kcalloc(count + 2, sizeof(dev), GFP_KERNEL);
2376 if (!__devs)
2377 goto err;
2378 memcpy(__devs, devs, sizeof(dev) * count);
2379 kfree(devs);
2380 devs = __devs;
2381
2382 if (is_nd_blk(&nd_region->dev))
2383 dev = create_namespace_blk(nd_region, nd_label, count);
2384 else {
2385 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2386 struct nd_namespace_index *nsindex;
2387
2388 nsindex = to_namespace_index(ndd, ndd->ns_current);
2389 dev = create_namespace_pmem(nd_region, nsindex, nd_label);
2390 }
2391
2392 if (IS_ERR(dev)) {
2393 switch (PTR_ERR(dev)) {
2394 case -EAGAIN:
2395 /* skip invalid labels */
2396 continue;
2397 case -ENODEV:
2398 /* fallthrough to seed creation */
2399 break;
2400 default:
2401 goto err;
2402 }
2403 } else
2404 devs[count++] = dev;
2405
2406 }
2407
2408 dev_dbg(&nd_region->dev, "discovered %d %s namespace%s\n",
2409 count, is_nd_blk(&nd_region->dev)
2410 ? "blk" : "pmem", count == 1 ? "" : "s");
2411
2412 if (count == 0) {
2413 /* Publish a zero-sized namespace for userspace to configure. */
2414 nd_mapping_free_labels(nd_mapping);
2415
2416 devs = kcalloc(2, sizeof(dev), GFP_KERNEL);
2417 if (!devs)
2418 goto err;
2419 if (is_nd_blk(&nd_region->dev)) {
2420 struct nd_namespace_blk *nsblk;
2421
2422 nsblk = kzalloc(sizeof(*nsblk), GFP_KERNEL);
2423 if (!nsblk)
2424 goto err;
2425 dev = &nsblk->common.dev;
2426 dev->type = &namespace_blk_device_type;
2427 } else {
2428 struct nd_namespace_pmem *nspm;
2429
2430 nspm = kzalloc(sizeof(*nspm), GFP_KERNEL);
2431 if (!nspm)
2432 goto err;
2433 dev = &nspm->nsio.common.dev;
2434 dev->type = &namespace_pmem_device_type;
2435 nd_namespace_pmem_set_resource(nd_region, nspm, 0);
2436 }
2437 dev->parent = &nd_region->dev;
2438 devs[count++] = dev;
2439 } else if (is_memory(&nd_region->dev)) {
2440 /* clean unselected labels */
2441 for (i = 0; i < nd_region->ndr_mappings; i++) {
2442 struct list_head *l, *e;
2443 LIST_HEAD(list);
2444 int j;
2445
2446 nd_mapping = &nd_region->mapping[i];
2447 if (list_empty(&nd_mapping->labels)) {
2448 WARN_ON(1);
2449 continue;
2450 }
2451
2452 j = count;
2453 list_for_each_safe(l, e, &nd_mapping->labels) {
2454 if (!j--)
2455 break;
2456 list_move_tail(l, &list);
2457 }
2458 nd_mapping_free_labels(nd_mapping);
2459 list_splice_init(&list, &nd_mapping->labels);
2460 }
2461 }
2462
2463 if (count > 1)
2464 sort(devs, count, sizeof(struct device *), cmp_dpa, NULL);
2465
2466 return devs;
2467
2468 err:
2469 if (devs) {
2470 for (i = 0; devs[i]; i++)
2471 if (is_nd_blk(&nd_region->dev))
2472 namespace_blk_release(devs[i]);
2473 else
2474 namespace_pmem_release(devs[i]);
2475 kfree(devs);
2476 }
2477 return NULL;
2478 }
2479
create_namespaces(struct nd_region * nd_region)2480 static struct device **create_namespaces(struct nd_region *nd_region)
2481 {
2482 struct nd_mapping *nd_mapping;
2483 struct device **devs;
2484 int i;
2485
2486 if (nd_region->ndr_mappings == 0)
2487 return NULL;
2488
2489 /* lock down all mappings while we scan labels */
2490 for (i = 0; i < nd_region->ndr_mappings; i++) {
2491 nd_mapping = &nd_region->mapping[i];
2492 mutex_lock_nested(&nd_mapping->lock, i);
2493 }
2494
2495 devs = scan_labels(nd_region);
2496
2497 for (i = 0; i < nd_region->ndr_mappings; i++) {
2498 int reverse = nd_region->ndr_mappings - 1 - i;
2499
2500 nd_mapping = &nd_region->mapping[reverse];
2501 mutex_unlock(&nd_mapping->lock);
2502 }
2503
2504 return devs;
2505 }
2506
deactivate_labels(void * region)2507 static void deactivate_labels(void *region)
2508 {
2509 struct nd_region *nd_region = region;
2510 int i;
2511
2512 for (i = 0; i < nd_region->ndr_mappings; i++) {
2513 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2514 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
2515 struct nvdimm *nvdimm = nd_mapping->nvdimm;
2516
2517 mutex_lock(&nd_mapping->lock);
2518 nd_mapping_free_labels(nd_mapping);
2519 mutex_unlock(&nd_mapping->lock);
2520
2521 put_ndd(ndd);
2522 nd_mapping->ndd = NULL;
2523 if (ndd)
2524 atomic_dec(&nvdimm->busy);
2525 }
2526 }
2527
init_active_labels(struct nd_region * nd_region)2528 static int init_active_labels(struct nd_region *nd_region)
2529 {
2530 int i;
2531
2532 for (i = 0; i < nd_region->ndr_mappings; i++) {
2533 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
2534 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
2535 struct nvdimm *nvdimm = nd_mapping->nvdimm;
2536 struct nd_label_ent *label_ent;
2537 int count, j;
2538
2539 /*
2540 * If the dimm is disabled then we may need to prevent
2541 * the region from being activated.
2542 */
2543 if (!ndd) {
2544 if (test_bit(NDD_LOCKED, &nvdimm->flags))
2545 /* fail, label data may be unreadable */;
2546 else if (test_bit(NDD_LABELING, &nvdimm->flags))
2547 /* fail, labels needed to disambiguate dpa */;
2548 else
2549 return 0;
2550
2551 dev_err(&nd_region->dev, "%s: is %s, failing probe\n",
2552 dev_name(&nd_mapping->nvdimm->dev),
2553 test_bit(NDD_LOCKED, &nvdimm->flags)
2554 ? "locked" : "disabled");
2555 return -ENXIO;
2556 }
2557 nd_mapping->ndd = ndd;
2558 atomic_inc(&nvdimm->busy);
2559 get_ndd(ndd);
2560
2561 count = nd_label_active_count(ndd);
2562 dev_dbg(ndd->dev, "count: %d\n", count);
2563 if (!count)
2564 continue;
2565 for (j = 0; j < count; j++) {
2566 struct nd_namespace_label *label;
2567
2568 label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
2569 if (!label_ent)
2570 break;
2571 label = nd_label_active(ndd, j);
2572 if (test_bit(NDD_NOBLK, &nvdimm->flags)) {
2573 u32 flags = __le32_to_cpu(label->flags);
2574
2575 flags &= ~NSLABEL_FLAG_LOCAL;
2576 label->flags = __cpu_to_le32(flags);
2577 }
2578 label_ent->label = label;
2579
2580 mutex_lock(&nd_mapping->lock);
2581 list_add_tail(&label_ent->list, &nd_mapping->labels);
2582 mutex_unlock(&nd_mapping->lock);
2583 }
2584
2585 if (j < count)
2586 break;
2587 }
2588
2589 if (i < nd_region->ndr_mappings) {
2590 deactivate_labels(nd_region);
2591 return -ENOMEM;
2592 }
2593
2594 return devm_add_action_or_reset(&nd_region->dev, deactivate_labels,
2595 nd_region);
2596 }
2597
nd_region_register_namespaces(struct nd_region * nd_region,int * err)2598 int nd_region_register_namespaces(struct nd_region *nd_region, int *err)
2599 {
2600 struct device **devs = NULL;
2601 int i, rc = 0, type;
2602
2603 *err = 0;
2604 nvdimm_bus_lock(&nd_region->dev);
2605 rc = init_active_labels(nd_region);
2606 if (rc) {
2607 nvdimm_bus_unlock(&nd_region->dev);
2608 return rc;
2609 }
2610
2611 type = nd_region_to_nstype(nd_region);
2612 switch (type) {
2613 case ND_DEVICE_NAMESPACE_IO:
2614 devs = create_namespace_io(nd_region);
2615 break;
2616 case ND_DEVICE_NAMESPACE_PMEM:
2617 case ND_DEVICE_NAMESPACE_BLK:
2618 devs = create_namespaces(nd_region);
2619 break;
2620 default:
2621 break;
2622 }
2623 nvdimm_bus_unlock(&nd_region->dev);
2624
2625 if (!devs)
2626 return -ENODEV;
2627
2628 for (i = 0; devs[i]; i++) {
2629 struct device *dev = devs[i];
2630 int id;
2631
2632 if (type == ND_DEVICE_NAMESPACE_BLK) {
2633 struct nd_namespace_blk *nsblk;
2634
2635 nsblk = to_nd_namespace_blk(dev);
2636 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2637 GFP_KERNEL);
2638 nsblk->id = id;
2639 } else if (type == ND_DEVICE_NAMESPACE_PMEM) {
2640 struct nd_namespace_pmem *nspm;
2641
2642 nspm = to_nd_namespace_pmem(dev);
2643 id = ida_simple_get(&nd_region->ns_ida, 0, 0,
2644 GFP_KERNEL);
2645 nspm->id = id;
2646 } else
2647 id = i;
2648
2649 if (id < 0)
2650 break;
2651 dev_set_name(dev, "namespace%d.%d", nd_region->id, id);
2652 nd_device_register(dev);
2653 }
2654 if (i)
2655 nd_region->ns_seed = devs[0];
2656
2657 if (devs[i]) {
2658 int j;
2659
2660 for (j = i; devs[j]; j++) {
2661 struct device *dev = devs[j];
2662
2663 device_initialize(dev);
2664 put_device(dev);
2665 }
2666 *err = j - i;
2667 /*
2668 * All of the namespaces we tried to register failed, so
2669 * fail region activation.
2670 */
2671 if (*err == 0)
2672 rc = -ENODEV;
2673 }
2674 kfree(devs);
2675
2676 if (rc == -ENODEV)
2677 return rc;
2678
2679 return i;
2680 }
2681