1 /*
2 * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
3 *
4 * This program is free software; you can redistribute it and/or modify
5 * it under the terms of version 2 of the GNU General Public License as
6 * published by the Free Software Foundation.
7 *
8 * This program is distributed in the hope that it will be useful, but
9 * WITHOUT ANY WARRANTY; without even the implied warranty of
10 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
11 * General Public License for more details.
12 */
13 #include <linux/scatterlist.h>
14 #include <linux/highmem.h>
15 #include <linux/sched.h>
16 #include <linux/slab.h>
17 #include <linux/hash.h>
18 #include <linux/sort.h>
19 #include <linux/io.h>
20 #include <linux/nd.h>
21 #include "nd-core.h"
22 #include "nd.h"
23
24 /*
25 * For readq() and writeq() on 32-bit builds, the hi-lo, lo-hi order is
26 * irrelevant.
27 */
28 #include <linux/io-64-nonatomic-hi-lo.h>
29
30 static DEFINE_IDA(region_ida);
31 static DEFINE_PER_CPU(int, flush_idx);
32
nvdimm_map_flush(struct device * dev,struct nvdimm * nvdimm,int dimm,struct nd_region_data * ndrd)33 static int nvdimm_map_flush(struct device *dev, struct nvdimm *nvdimm, int dimm,
34 struct nd_region_data *ndrd)
35 {
36 int i, j;
37
38 dev_dbg(dev, "%s: map %d flush address%s\n", nvdimm_name(nvdimm),
39 nvdimm->num_flush, nvdimm->num_flush == 1 ? "" : "es");
40 for (i = 0; i < (1 << ndrd->hints_shift); i++) {
41 struct resource *res = &nvdimm->flush_wpq[i];
42 unsigned long pfn = PHYS_PFN(res->start);
43 void __iomem *flush_page;
44
45 /* check if flush hints share a page */
46 for (j = 0; j < i; j++) {
47 struct resource *res_j = &nvdimm->flush_wpq[j];
48 unsigned long pfn_j = PHYS_PFN(res_j->start);
49
50 if (pfn == pfn_j)
51 break;
52 }
53
54 if (j < i)
55 flush_page = (void __iomem *) ((unsigned long)
56 ndrd_get_flush_wpq(ndrd, dimm, j)
57 & PAGE_MASK);
58 else
59 flush_page = devm_nvdimm_ioremap(dev,
60 PFN_PHYS(pfn), PAGE_SIZE);
61 if (!flush_page)
62 return -ENXIO;
63 ndrd_set_flush_wpq(ndrd, dimm, i, flush_page
64 + (res->start & ~PAGE_MASK));
65 }
66
67 return 0;
68 }
69
nd_region_activate(struct nd_region * nd_region)70 int nd_region_activate(struct nd_region *nd_region)
71 {
72 int i, j, num_flush = 0;
73 struct nd_region_data *ndrd;
74 struct device *dev = &nd_region->dev;
75 size_t flush_data_size = sizeof(void *);
76
77 nvdimm_bus_lock(&nd_region->dev);
78 for (i = 0; i < nd_region->ndr_mappings; i++) {
79 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
80 struct nvdimm *nvdimm = nd_mapping->nvdimm;
81
82 /* at least one null hint slot per-dimm for the "no-hint" case */
83 flush_data_size += sizeof(void *);
84 num_flush = min_not_zero(num_flush, nvdimm->num_flush);
85 if (!nvdimm->num_flush)
86 continue;
87 flush_data_size += nvdimm->num_flush * sizeof(void *);
88 }
89 nvdimm_bus_unlock(&nd_region->dev);
90
91 ndrd = devm_kzalloc(dev, sizeof(*ndrd) + flush_data_size, GFP_KERNEL);
92 if (!ndrd)
93 return -ENOMEM;
94 dev_set_drvdata(dev, ndrd);
95
96 if (!num_flush)
97 return 0;
98
99 ndrd->hints_shift = ilog2(num_flush);
100 for (i = 0; i < nd_region->ndr_mappings; i++) {
101 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
102 struct nvdimm *nvdimm = nd_mapping->nvdimm;
103 int rc = nvdimm_map_flush(&nd_region->dev, nvdimm, i, ndrd);
104
105 if (rc)
106 return rc;
107 }
108
109 /*
110 * Clear out entries that are duplicates. This should prevent the
111 * extra flushings.
112 */
113 for (i = 0; i < nd_region->ndr_mappings - 1; i++) {
114 /* ignore if NULL already */
115 if (!ndrd_get_flush_wpq(ndrd, i, 0))
116 continue;
117
118 for (j = i + 1; j < nd_region->ndr_mappings; j++)
119 if (ndrd_get_flush_wpq(ndrd, i, 0) ==
120 ndrd_get_flush_wpq(ndrd, j, 0))
121 ndrd_set_flush_wpq(ndrd, j, 0, NULL);
122 }
123
124 return 0;
125 }
126
nd_region_release(struct device * dev)127 static void nd_region_release(struct device *dev)
128 {
129 struct nd_region *nd_region = to_nd_region(dev);
130 u16 i;
131
132 for (i = 0; i < nd_region->ndr_mappings; i++) {
133 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
134 struct nvdimm *nvdimm = nd_mapping->nvdimm;
135
136 put_device(&nvdimm->dev);
137 }
138 free_percpu(nd_region->lane);
139 ida_simple_remove(®ion_ida, nd_region->id);
140 if (is_nd_blk(dev))
141 kfree(to_nd_blk_region(dev));
142 else
143 kfree(nd_region);
144 }
145
146 static struct device_type nd_blk_device_type = {
147 .name = "nd_blk",
148 .release = nd_region_release,
149 };
150
151 static struct device_type nd_pmem_device_type = {
152 .name = "nd_pmem",
153 .release = nd_region_release,
154 };
155
156 static struct device_type nd_volatile_device_type = {
157 .name = "nd_volatile",
158 .release = nd_region_release,
159 };
160
is_nd_pmem(struct device * dev)161 bool is_nd_pmem(struct device *dev)
162 {
163 return dev ? dev->type == &nd_pmem_device_type : false;
164 }
165
is_nd_blk(struct device * dev)166 bool is_nd_blk(struct device *dev)
167 {
168 return dev ? dev->type == &nd_blk_device_type : false;
169 }
170
is_nd_volatile(struct device * dev)171 bool is_nd_volatile(struct device *dev)
172 {
173 return dev ? dev->type == &nd_volatile_device_type : false;
174 }
175
to_nd_region(struct device * dev)176 struct nd_region *to_nd_region(struct device *dev)
177 {
178 struct nd_region *nd_region = container_of(dev, struct nd_region, dev);
179
180 WARN_ON(dev->type->release != nd_region_release);
181 return nd_region;
182 }
183 EXPORT_SYMBOL_GPL(to_nd_region);
184
nd_region_dev(struct nd_region * nd_region)185 struct device *nd_region_dev(struct nd_region *nd_region)
186 {
187 if (!nd_region)
188 return NULL;
189 return &nd_region->dev;
190 }
191 EXPORT_SYMBOL_GPL(nd_region_dev);
192
to_nd_blk_region(struct device * dev)193 struct nd_blk_region *to_nd_blk_region(struct device *dev)
194 {
195 struct nd_region *nd_region = to_nd_region(dev);
196
197 WARN_ON(!is_nd_blk(dev));
198 return container_of(nd_region, struct nd_blk_region, nd_region);
199 }
200 EXPORT_SYMBOL_GPL(to_nd_blk_region);
201
nd_region_provider_data(struct nd_region * nd_region)202 void *nd_region_provider_data(struct nd_region *nd_region)
203 {
204 return nd_region->provider_data;
205 }
206 EXPORT_SYMBOL_GPL(nd_region_provider_data);
207
nd_blk_region_provider_data(struct nd_blk_region * ndbr)208 void *nd_blk_region_provider_data(struct nd_blk_region *ndbr)
209 {
210 return ndbr->blk_provider_data;
211 }
212 EXPORT_SYMBOL_GPL(nd_blk_region_provider_data);
213
nd_blk_region_set_provider_data(struct nd_blk_region * ndbr,void * data)214 void nd_blk_region_set_provider_data(struct nd_blk_region *ndbr, void *data)
215 {
216 ndbr->blk_provider_data = data;
217 }
218 EXPORT_SYMBOL_GPL(nd_blk_region_set_provider_data);
219
220 /**
221 * nd_region_to_nstype() - region to an integer namespace type
222 * @nd_region: region-device to interrogate
223 *
224 * This is the 'nstype' attribute of a region as well, an input to the
225 * MODALIAS for namespace devices, and bit number for a nvdimm_bus to match
226 * namespace devices with namespace drivers.
227 */
nd_region_to_nstype(struct nd_region * nd_region)228 int nd_region_to_nstype(struct nd_region *nd_region)
229 {
230 if (is_memory(&nd_region->dev)) {
231 u16 i, alias;
232
233 for (i = 0, alias = 0; i < nd_region->ndr_mappings; i++) {
234 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
235 struct nvdimm *nvdimm = nd_mapping->nvdimm;
236
237 if (test_bit(NDD_ALIASING, &nvdimm->flags))
238 alias++;
239 }
240 if (alias)
241 return ND_DEVICE_NAMESPACE_PMEM;
242 else
243 return ND_DEVICE_NAMESPACE_IO;
244 } else if (is_nd_blk(&nd_region->dev)) {
245 return ND_DEVICE_NAMESPACE_BLK;
246 }
247
248 return 0;
249 }
250 EXPORT_SYMBOL(nd_region_to_nstype);
251
size_show(struct device * dev,struct device_attribute * attr,char * buf)252 static ssize_t size_show(struct device *dev,
253 struct device_attribute *attr, char *buf)
254 {
255 struct nd_region *nd_region = to_nd_region(dev);
256 unsigned long long size = 0;
257
258 if (is_memory(dev)) {
259 size = nd_region->ndr_size;
260 } else if (nd_region->ndr_mappings == 1) {
261 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
262
263 size = nd_mapping->size;
264 }
265
266 return sprintf(buf, "%llu\n", size);
267 }
268 static DEVICE_ATTR_RO(size);
269
deep_flush_show(struct device * dev,struct device_attribute * attr,char * buf)270 static ssize_t deep_flush_show(struct device *dev,
271 struct device_attribute *attr, char *buf)
272 {
273 struct nd_region *nd_region = to_nd_region(dev);
274
275 /*
276 * NOTE: in the nvdimm_has_flush() error case this attribute is
277 * not visible.
278 */
279 return sprintf(buf, "%d\n", nvdimm_has_flush(nd_region));
280 }
281
deep_flush_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)282 static ssize_t deep_flush_store(struct device *dev, struct device_attribute *attr,
283 const char *buf, size_t len)
284 {
285 bool flush;
286 int rc = strtobool(buf, &flush);
287 struct nd_region *nd_region = to_nd_region(dev);
288
289 if (rc)
290 return rc;
291 if (!flush)
292 return -EINVAL;
293 nvdimm_flush(nd_region);
294
295 return len;
296 }
297 static DEVICE_ATTR_RW(deep_flush);
298
mappings_show(struct device * dev,struct device_attribute * attr,char * buf)299 static ssize_t mappings_show(struct device *dev,
300 struct device_attribute *attr, char *buf)
301 {
302 struct nd_region *nd_region = to_nd_region(dev);
303
304 return sprintf(buf, "%d\n", nd_region->ndr_mappings);
305 }
306 static DEVICE_ATTR_RO(mappings);
307
nstype_show(struct device * dev,struct device_attribute * attr,char * buf)308 static ssize_t nstype_show(struct device *dev,
309 struct device_attribute *attr, char *buf)
310 {
311 struct nd_region *nd_region = to_nd_region(dev);
312
313 return sprintf(buf, "%d\n", nd_region_to_nstype(nd_region));
314 }
315 static DEVICE_ATTR_RO(nstype);
316
set_cookie_show(struct device * dev,struct device_attribute * attr,char * buf)317 static ssize_t set_cookie_show(struct device *dev,
318 struct device_attribute *attr, char *buf)
319 {
320 struct nd_region *nd_region = to_nd_region(dev);
321 struct nd_interleave_set *nd_set = nd_region->nd_set;
322 ssize_t rc = 0;
323
324 if (is_memory(dev) && nd_set)
325 /* pass, should be precluded by region_visible */;
326 else
327 return -ENXIO;
328
329 /*
330 * The cookie to show depends on which specification of the
331 * labels we are using. If there are not labels then default to
332 * the v1.1 namespace label cookie definition. To read all this
333 * data we need to wait for probing to settle.
334 */
335 device_lock(dev);
336 nvdimm_bus_lock(dev);
337 wait_nvdimm_bus_probe_idle(dev);
338 if (nd_region->ndr_mappings) {
339 struct nd_mapping *nd_mapping = &nd_region->mapping[0];
340 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
341
342 if (ndd) {
343 struct nd_namespace_index *nsindex;
344
345 nsindex = to_namespace_index(ndd, ndd->ns_current);
346 rc = sprintf(buf, "%#llx\n",
347 nd_region_interleave_set_cookie(nd_region,
348 nsindex));
349 }
350 }
351 nvdimm_bus_unlock(dev);
352 device_unlock(dev);
353
354 if (rc)
355 return rc;
356 return sprintf(buf, "%#llx\n", nd_set->cookie1);
357 }
358 static DEVICE_ATTR_RO(set_cookie);
359
nd_region_available_dpa(struct nd_region * nd_region)360 resource_size_t nd_region_available_dpa(struct nd_region *nd_region)
361 {
362 resource_size_t blk_max_overlap = 0, available, overlap;
363 int i;
364
365 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
366
367 retry:
368 available = 0;
369 overlap = blk_max_overlap;
370 for (i = 0; i < nd_region->ndr_mappings; i++) {
371 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
372 struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
373
374 /* if a dimm is disabled the available capacity is zero */
375 if (!ndd)
376 return 0;
377
378 if (is_memory(&nd_region->dev)) {
379 available += nd_pmem_available_dpa(nd_region,
380 nd_mapping, &overlap);
381 if (overlap > blk_max_overlap) {
382 blk_max_overlap = overlap;
383 goto retry;
384 }
385 } else if (is_nd_blk(&nd_region->dev))
386 available += nd_blk_available_dpa(nd_region);
387 }
388
389 return available;
390 }
391
nd_region_allocatable_dpa(struct nd_region * nd_region)392 resource_size_t nd_region_allocatable_dpa(struct nd_region *nd_region)
393 {
394 resource_size_t available = 0;
395 int i;
396
397 if (is_memory(&nd_region->dev))
398 available = PHYS_ADDR_MAX;
399
400 WARN_ON(!is_nvdimm_bus_locked(&nd_region->dev));
401 for (i = 0; i < nd_region->ndr_mappings; i++) {
402 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
403
404 if (is_memory(&nd_region->dev))
405 available = min(available,
406 nd_pmem_max_contiguous_dpa(nd_region,
407 nd_mapping));
408 else if (is_nd_blk(&nd_region->dev))
409 available += nd_blk_available_dpa(nd_region);
410 }
411 if (is_memory(&nd_region->dev))
412 return available * nd_region->ndr_mappings;
413 return available;
414 }
415
available_size_show(struct device * dev,struct device_attribute * attr,char * buf)416 static ssize_t available_size_show(struct device *dev,
417 struct device_attribute *attr, char *buf)
418 {
419 struct nd_region *nd_region = to_nd_region(dev);
420 unsigned long long available = 0;
421
422 /*
423 * Flush in-flight updates and grab a snapshot of the available
424 * size. Of course, this value is potentially invalidated the
425 * memory nvdimm_bus_lock() is dropped, but that's userspace's
426 * problem to not race itself.
427 */
428 nvdimm_bus_lock(dev);
429 wait_nvdimm_bus_probe_idle(dev);
430 available = nd_region_available_dpa(nd_region);
431 nvdimm_bus_unlock(dev);
432
433 return sprintf(buf, "%llu\n", available);
434 }
435 static DEVICE_ATTR_RO(available_size);
436
max_available_extent_show(struct device * dev,struct device_attribute * attr,char * buf)437 static ssize_t max_available_extent_show(struct device *dev,
438 struct device_attribute *attr, char *buf)
439 {
440 struct nd_region *nd_region = to_nd_region(dev);
441 unsigned long long available = 0;
442
443 nvdimm_bus_lock(dev);
444 wait_nvdimm_bus_probe_idle(dev);
445 available = nd_region_allocatable_dpa(nd_region);
446 nvdimm_bus_unlock(dev);
447
448 return sprintf(buf, "%llu\n", available);
449 }
450 static DEVICE_ATTR_RO(max_available_extent);
451
init_namespaces_show(struct device * dev,struct device_attribute * attr,char * buf)452 static ssize_t init_namespaces_show(struct device *dev,
453 struct device_attribute *attr, char *buf)
454 {
455 struct nd_region_data *ndrd = dev_get_drvdata(dev);
456 ssize_t rc;
457
458 nvdimm_bus_lock(dev);
459 if (ndrd)
460 rc = sprintf(buf, "%d/%d\n", ndrd->ns_active, ndrd->ns_count);
461 else
462 rc = -ENXIO;
463 nvdimm_bus_unlock(dev);
464
465 return rc;
466 }
467 static DEVICE_ATTR_RO(init_namespaces);
468
namespace_seed_show(struct device * dev,struct device_attribute * attr,char * buf)469 static ssize_t namespace_seed_show(struct device *dev,
470 struct device_attribute *attr, char *buf)
471 {
472 struct nd_region *nd_region = to_nd_region(dev);
473 ssize_t rc;
474
475 nvdimm_bus_lock(dev);
476 if (nd_region->ns_seed)
477 rc = sprintf(buf, "%s\n", dev_name(nd_region->ns_seed));
478 else
479 rc = sprintf(buf, "\n");
480 nvdimm_bus_unlock(dev);
481 return rc;
482 }
483 static DEVICE_ATTR_RO(namespace_seed);
484
btt_seed_show(struct device * dev,struct device_attribute * attr,char * buf)485 static ssize_t btt_seed_show(struct device *dev,
486 struct device_attribute *attr, char *buf)
487 {
488 struct nd_region *nd_region = to_nd_region(dev);
489 ssize_t rc;
490
491 nvdimm_bus_lock(dev);
492 if (nd_region->btt_seed)
493 rc = sprintf(buf, "%s\n", dev_name(nd_region->btt_seed));
494 else
495 rc = sprintf(buf, "\n");
496 nvdimm_bus_unlock(dev);
497
498 return rc;
499 }
500 static DEVICE_ATTR_RO(btt_seed);
501
pfn_seed_show(struct device * dev,struct device_attribute * attr,char * buf)502 static ssize_t pfn_seed_show(struct device *dev,
503 struct device_attribute *attr, char *buf)
504 {
505 struct nd_region *nd_region = to_nd_region(dev);
506 ssize_t rc;
507
508 nvdimm_bus_lock(dev);
509 if (nd_region->pfn_seed)
510 rc = sprintf(buf, "%s\n", dev_name(nd_region->pfn_seed));
511 else
512 rc = sprintf(buf, "\n");
513 nvdimm_bus_unlock(dev);
514
515 return rc;
516 }
517 static DEVICE_ATTR_RO(pfn_seed);
518
dax_seed_show(struct device * dev,struct device_attribute * attr,char * buf)519 static ssize_t dax_seed_show(struct device *dev,
520 struct device_attribute *attr, char *buf)
521 {
522 struct nd_region *nd_region = to_nd_region(dev);
523 ssize_t rc;
524
525 nvdimm_bus_lock(dev);
526 if (nd_region->dax_seed)
527 rc = sprintf(buf, "%s\n", dev_name(nd_region->dax_seed));
528 else
529 rc = sprintf(buf, "\n");
530 nvdimm_bus_unlock(dev);
531
532 return rc;
533 }
534 static DEVICE_ATTR_RO(dax_seed);
535
read_only_show(struct device * dev,struct device_attribute * attr,char * buf)536 static ssize_t read_only_show(struct device *dev,
537 struct device_attribute *attr, char *buf)
538 {
539 struct nd_region *nd_region = to_nd_region(dev);
540
541 return sprintf(buf, "%d\n", nd_region->ro);
542 }
543
read_only_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)544 static ssize_t read_only_store(struct device *dev,
545 struct device_attribute *attr, const char *buf, size_t len)
546 {
547 bool ro;
548 int rc = strtobool(buf, &ro);
549 struct nd_region *nd_region = to_nd_region(dev);
550
551 if (rc)
552 return rc;
553
554 nd_region->ro = ro;
555 return len;
556 }
557 static DEVICE_ATTR_RW(read_only);
558
region_badblocks_show(struct device * dev,struct device_attribute * attr,char * buf)559 static ssize_t region_badblocks_show(struct device *dev,
560 struct device_attribute *attr, char *buf)
561 {
562 struct nd_region *nd_region = to_nd_region(dev);
563
564 return badblocks_show(&nd_region->bb, buf, 0);
565 }
566
567 static DEVICE_ATTR(badblocks, 0444, region_badblocks_show, NULL);
568
resource_show(struct device * dev,struct device_attribute * attr,char * buf)569 static ssize_t resource_show(struct device *dev,
570 struct device_attribute *attr, char *buf)
571 {
572 struct nd_region *nd_region = to_nd_region(dev);
573
574 return sprintf(buf, "%#llx\n", nd_region->ndr_start);
575 }
576 static DEVICE_ATTR_RO(resource);
577
persistence_domain_show(struct device * dev,struct device_attribute * attr,char * buf)578 static ssize_t persistence_domain_show(struct device *dev,
579 struct device_attribute *attr, char *buf)
580 {
581 struct nd_region *nd_region = to_nd_region(dev);
582
583 if (test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags))
584 return sprintf(buf, "cpu_cache\n");
585 else if (test_bit(ND_REGION_PERSIST_MEMCTRL, &nd_region->flags))
586 return sprintf(buf, "memory_controller\n");
587 else
588 return sprintf(buf, "\n");
589 }
590 static DEVICE_ATTR_RO(persistence_domain);
591
592 static struct attribute *nd_region_attributes[] = {
593 &dev_attr_size.attr,
594 &dev_attr_nstype.attr,
595 &dev_attr_mappings.attr,
596 &dev_attr_btt_seed.attr,
597 &dev_attr_pfn_seed.attr,
598 &dev_attr_dax_seed.attr,
599 &dev_attr_deep_flush.attr,
600 &dev_attr_read_only.attr,
601 &dev_attr_set_cookie.attr,
602 &dev_attr_available_size.attr,
603 &dev_attr_max_available_extent.attr,
604 &dev_attr_namespace_seed.attr,
605 &dev_attr_init_namespaces.attr,
606 &dev_attr_badblocks.attr,
607 &dev_attr_resource.attr,
608 &dev_attr_persistence_domain.attr,
609 NULL,
610 };
611
region_visible(struct kobject * kobj,struct attribute * a,int n)612 static umode_t region_visible(struct kobject *kobj, struct attribute *a, int n)
613 {
614 struct device *dev = container_of(kobj, typeof(*dev), kobj);
615 struct nd_region *nd_region = to_nd_region(dev);
616 struct nd_interleave_set *nd_set = nd_region->nd_set;
617 int type = nd_region_to_nstype(nd_region);
618
619 if (!is_memory(dev) && a == &dev_attr_pfn_seed.attr)
620 return 0;
621
622 if (!is_memory(dev) && a == &dev_attr_dax_seed.attr)
623 return 0;
624
625 if (!is_nd_pmem(dev) && a == &dev_attr_badblocks.attr)
626 return 0;
627
628 if (a == &dev_attr_resource.attr) {
629 if (is_nd_pmem(dev))
630 return 0400;
631 else
632 return 0;
633 }
634
635 if (a == &dev_attr_deep_flush.attr) {
636 int has_flush = nvdimm_has_flush(nd_region);
637
638 if (has_flush == 1)
639 return a->mode;
640 else if (has_flush == 0)
641 return 0444;
642 else
643 return 0;
644 }
645
646 if (a == &dev_attr_persistence_domain.attr) {
647 if ((nd_region->flags & (BIT(ND_REGION_PERSIST_CACHE)
648 | BIT(ND_REGION_PERSIST_MEMCTRL))) == 0)
649 return 0;
650 return a->mode;
651 }
652
653 if (a != &dev_attr_set_cookie.attr
654 && a != &dev_attr_available_size.attr)
655 return a->mode;
656
657 if ((type == ND_DEVICE_NAMESPACE_PMEM
658 || type == ND_DEVICE_NAMESPACE_BLK)
659 && a == &dev_attr_available_size.attr)
660 return a->mode;
661 else if (is_memory(dev) && nd_set)
662 return a->mode;
663
664 return 0;
665 }
666
667 struct attribute_group nd_region_attribute_group = {
668 .attrs = nd_region_attributes,
669 .is_visible = region_visible,
670 };
671 EXPORT_SYMBOL_GPL(nd_region_attribute_group);
672
nd_region_interleave_set_cookie(struct nd_region * nd_region,struct nd_namespace_index * nsindex)673 u64 nd_region_interleave_set_cookie(struct nd_region *nd_region,
674 struct nd_namespace_index *nsindex)
675 {
676 struct nd_interleave_set *nd_set = nd_region->nd_set;
677
678 if (!nd_set)
679 return 0;
680
681 if (nsindex && __le16_to_cpu(nsindex->major) == 1
682 && __le16_to_cpu(nsindex->minor) == 1)
683 return nd_set->cookie1;
684 return nd_set->cookie2;
685 }
686
nd_region_interleave_set_altcookie(struct nd_region * nd_region)687 u64 nd_region_interleave_set_altcookie(struct nd_region *nd_region)
688 {
689 struct nd_interleave_set *nd_set = nd_region->nd_set;
690
691 if (nd_set)
692 return nd_set->altcookie;
693 return 0;
694 }
695
nd_mapping_free_labels(struct nd_mapping * nd_mapping)696 void nd_mapping_free_labels(struct nd_mapping *nd_mapping)
697 {
698 struct nd_label_ent *label_ent, *e;
699
700 lockdep_assert_held(&nd_mapping->lock);
701 list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
702 list_del(&label_ent->list);
703 kfree(label_ent);
704 }
705 }
706
707 /*
708 * Upon successful probe/remove, take/release a reference on the
709 * associated interleave set (if present), and plant new btt + namespace
710 * seeds. Also, on the removal of a BLK region, notify the provider to
711 * disable the region.
712 */
nd_region_notify_driver_action(struct nvdimm_bus * nvdimm_bus,struct device * dev,bool probe)713 static void nd_region_notify_driver_action(struct nvdimm_bus *nvdimm_bus,
714 struct device *dev, bool probe)
715 {
716 struct nd_region *nd_region;
717
718 if (!probe && is_nd_region(dev)) {
719 int i;
720
721 nd_region = to_nd_region(dev);
722 for (i = 0; i < nd_region->ndr_mappings; i++) {
723 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
724 struct nvdimm_drvdata *ndd = nd_mapping->ndd;
725 struct nvdimm *nvdimm = nd_mapping->nvdimm;
726
727 mutex_lock(&nd_mapping->lock);
728 nd_mapping_free_labels(nd_mapping);
729 mutex_unlock(&nd_mapping->lock);
730
731 put_ndd(ndd);
732 nd_mapping->ndd = NULL;
733 if (ndd)
734 atomic_dec(&nvdimm->busy);
735 }
736 }
737 if (dev->parent && is_nd_region(dev->parent) && probe) {
738 nd_region = to_nd_region(dev->parent);
739 nvdimm_bus_lock(dev);
740 if (nd_region->ns_seed == dev)
741 nd_region_create_ns_seed(nd_region);
742 nvdimm_bus_unlock(dev);
743 }
744 if (is_nd_btt(dev) && probe) {
745 struct nd_btt *nd_btt = to_nd_btt(dev);
746
747 nd_region = to_nd_region(dev->parent);
748 nvdimm_bus_lock(dev);
749 if (nd_region->btt_seed == dev)
750 nd_region_create_btt_seed(nd_region);
751 if (nd_region->ns_seed == &nd_btt->ndns->dev)
752 nd_region_create_ns_seed(nd_region);
753 nvdimm_bus_unlock(dev);
754 }
755 if (is_nd_pfn(dev) && probe) {
756 struct nd_pfn *nd_pfn = to_nd_pfn(dev);
757
758 nd_region = to_nd_region(dev->parent);
759 nvdimm_bus_lock(dev);
760 if (nd_region->pfn_seed == dev)
761 nd_region_create_pfn_seed(nd_region);
762 if (nd_region->ns_seed == &nd_pfn->ndns->dev)
763 nd_region_create_ns_seed(nd_region);
764 nvdimm_bus_unlock(dev);
765 }
766 if (is_nd_dax(dev) && probe) {
767 struct nd_dax *nd_dax = to_nd_dax(dev);
768
769 nd_region = to_nd_region(dev->parent);
770 nvdimm_bus_lock(dev);
771 if (nd_region->dax_seed == dev)
772 nd_region_create_dax_seed(nd_region);
773 if (nd_region->ns_seed == &nd_dax->nd_pfn.ndns->dev)
774 nd_region_create_ns_seed(nd_region);
775 nvdimm_bus_unlock(dev);
776 }
777 }
778
nd_region_probe_success(struct nvdimm_bus * nvdimm_bus,struct device * dev)779 void nd_region_probe_success(struct nvdimm_bus *nvdimm_bus, struct device *dev)
780 {
781 nd_region_notify_driver_action(nvdimm_bus, dev, true);
782 }
783
nd_region_disable(struct nvdimm_bus * nvdimm_bus,struct device * dev)784 void nd_region_disable(struct nvdimm_bus *nvdimm_bus, struct device *dev)
785 {
786 nd_region_notify_driver_action(nvdimm_bus, dev, false);
787 }
788
mappingN(struct device * dev,char * buf,int n)789 static ssize_t mappingN(struct device *dev, char *buf, int n)
790 {
791 struct nd_region *nd_region = to_nd_region(dev);
792 struct nd_mapping *nd_mapping;
793 struct nvdimm *nvdimm;
794
795 if (n >= nd_region->ndr_mappings)
796 return -ENXIO;
797 nd_mapping = &nd_region->mapping[n];
798 nvdimm = nd_mapping->nvdimm;
799
800 return sprintf(buf, "%s,%llu,%llu,%d\n", dev_name(&nvdimm->dev),
801 nd_mapping->start, nd_mapping->size,
802 nd_mapping->position);
803 }
804
805 #define REGION_MAPPING(idx) \
806 static ssize_t mapping##idx##_show(struct device *dev, \
807 struct device_attribute *attr, char *buf) \
808 { \
809 return mappingN(dev, buf, idx); \
810 } \
811 static DEVICE_ATTR_RO(mapping##idx)
812
813 /*
814 * 32 should be enough for a while, even in the presence of socket
815 * interleave a 32-way interleave set is a degenerate case.
816 */
817 REGION_MAPPING(0);
818 REGION_MAPPING(1);
819 REGION_MAPPING(2);
820 REGION_MAPPING(3);
821 REGION_MAPPING(4);
822 REGION_MAPPING(5);
823 REGION_MAPPING(6);
824 REGION_MAPPING(7);
825 REGION_MAPPING(8);
826 REGION_MAPPING(9);
827 REGION_MAPPING(10);
828 REGION_MAPPING(11);
829 REGION_MAPPING(12);
830 REGION_MAPPING(13);
831 REGION_MAPPING(14);
832 REGION_MAPPING(15);
833 REGION_MAPPING(16);
834 REGION_MAPPING(17);
835 REGION_MAPPING(18);
836 REGION_MAPPING(19);
837 REGION_MAPPING(20);
838 REGION_MAPPING(21);
839 REGION_MAPPING(22);
840 REGION_MAPPING(23);
841 REGION_MAPPING(24);
842 REGION_MAPPING(25);
843 REGION_MAPPING(26);
844 REGION_MAPPING(27);
845 REGION_MAPPING(28);
846 REGION_MAPPING(29);
847 REGION_MAPPING(30);
848 REGION_MAPPING(31);
849
mapping_visible(struct kobject * kobj,struct attribute * a,int n)850 static umode_t mapping_visible(struct kobject *kobj, struct attribute *a, int n)
851 {
852 struct device *dev = container_of(kobj, struct device, kobj);
853 struct nd_region *nd_region = to_nd_region(dev);
854
855 if (n < nd_region->ndr_mappings)
856 return a->mode;
857 return 0;
858 }
859
860 static struct attribute *mapping_attributes[] = {
861 &dev_attr_mapping0.attr,
862 &dev_attr_mapping1.attr,
863 &dev_attr_mapping2.attr,
864 &dev_attr_mapping3.attr,
865 &dev_attr_mapping4.attr,
866 &dev_attr_mapping5.attr,
867 &dev_attr_mapping6.attr,
868 &dev_attr_mapping7.attr,
869 &dev_attr_mapping8.attr,
870 &dev_attr_mapping9.attr,
871 &dev_attr_mapping10.attr,
872 &dev_attr_mapping11.attr,
873 &dev_attr_mapping12.attr,
874 &dev_attr_mapping13.attr,
875 &dev_attr_mapping14.attr,
876 &dev_attr_mapping15.attr,
877 &dev_attr_mapping16.attr,
878 &dev_attr_mapping17.attr,
879 &dev_attr_mapping18.attr,
880 &dev_attr_mapping19.attr,
881 &dev_attr_mapping20.attr,
882 &dev_attr_mapping21.attr,
883 &dev_attr_mapping22.attr,
884 &dev_attr_mapping23.attr,
885 &dev_attr_mapping24.attr,
886 &dev_attr_mapping25.attr,
887 &dev_attr_mapping26.attr,
888 &dev_attr_mapping27.attr,
889 &dev_attr_mapping28.attr,
890 &dev_attr_mapping29.attr,
891 &dev_attr_mapping30.attr,
892 &dev_attr_mapping31.attr,
893 NULL,
894 };
895
896 struct attribute_group nd_mapping_attribute_group = {
897 .is_visible = mapping_visible,
898 .attrs = mapping_attributes,
899 };
900 EXPORT_SYMBOL_GPL(nd_mapping_attribute_group);
901
nd_blk_region_init(struct nd_region * nd_region)902 int nd_blk_region_init(struct nd_region *nd_region)
903 {
904 struct device *dev = &nd_region->dev;
905 struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
906
907 if (!is_nd_blk(dev))
908 return 0;
909
910 if (nd_region->ndr_mappings < 1) {
911 dev_dbg(dev, "invalid BLK region\n");
912 return -ENXIO;
913 }
914
915 return to_nd_blk_region(dev)->enable(nvdimm_bus, dev);
916 }
917
918 /**
919 * nd_region_acquire_lane - allocate and lock a lane
920 * @nd_region: region id and number of lanes possible
921 *
922 * A lane correlates to a BLK-data-window and/or a log slot in the BTT.
923 * We optimize for the common case where there are 256 lanes, one
924 * per-cpu. For larger systems we need to lock to share lanes. For now
925 * this implementation assumes the cost of maintaining an allocator for
926 * free lanes is on the order of the lock hold time, so it implements a
927 * static lane = cpu % num_lanes mapping.
928 *
929 * In the case of a BTT instance on top of a BLK namespace a lane may be
930 * acquired recursively. We lock on the first instance.
931 *
932 * In the case of a BTT instance on top of PMEM, we only acquire a lane
933 * for the BTT metadata updates.
934 */
nd_region_acquire_lane(struct nd_region * nd_region)935 unsigned int nd_region_acquire_lane(struct nd_region *nd_region)
936 {
937 unsigned int cpu, lane;
938
939 cpu = get_cpu();
940 if (nd_region->num_lanes < nr_cpu_ids) {
941 struct nd_percpu_lane *ndl_lock, *ndl_count;
942
943 lane = cpu % nd_region->num_lanes;
944 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
945 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
946 if (ndl_count->count++ == 0)
947 spin_lock(&ndl_lock->lock);
948 } else
949 lane = cpu;
950
951 return lane;
952 }
953 EXPORT_SYMBOL(nd_region_acquire_lane);
954
nd_region_release_lane(struct nd_region * nd_region,unsigned int lane)955 void nd_region_release_lane(struct nd_region *nd_region, unsigned int lane)
956 {
957 if (nd_region->num_lanes < nr_cpu_ids) {
958 unsigned int cpu = get_cpu();
959 struct nd_percpu_lane *ndl_lock, *ndl_count;
960
961 ndl_count = per_cpu_ptr(nd_region->lane, cpu);
962 ndl_lock = per_cpu_ptr(nd_region->lane, lane);
963 if (--ndl_count->count == 0)
964 spin_unlock(&ndl_lock->lock);
965 put_cpu();
966 }
967 put_cpu();
968 }
969 EXPORT_SYMBOL(nd_region_release_lane);
970
nd_region_create(struct nvdimm_bus * nvdimm_bus,struct nd_region_desc * ndr_desc,struct device_type * dev_type,const char * caller)971 static struct nd_region *nd_region_create(struct nvdimm_bus *nvdimm_bus,
972 struct nd_region_desc *ndr_desc, struct device_type *dev_type,
973 const char *caller)
974 {
975 struct nd_region *nd_region;
976 struct device *dev;
977 void *region_buf;
978 unsigned int i;
979 int ro = 0;
980
981 for (i = 0; i < ndr_desc->num_mappings; i++) {
982 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
983 struct nvdimm *nvdimm = mapping->nvdimm;
984
985 if ((mapping->start | mapping->size) % SZ_4K) {
986 dev_err(&nvdimm_bus->dev, "%s: %s mapping%d is not 4K aligned\n",
987 caller, dev_name(&nvdimm->dev), i);
988
989 return NULL;
990 }
991
992 if (test_bit(NDD_UNARMED, &nvdimm->flags))
993 ro = 1;
994 }
995
996 if (dev_type == &nd_blk_device_type) {
997 struct nd_blk_region_desc *ndbr_desc;
998 struct nd_blk_region *ndbr;
999
1000 ndbr_desc = to_blk_region_desc(ndr_desc);
1001 ndbr = kzalloc(sizeof(*ndbr) + sizeof(struct nd_mapping)
1002 * ndr_desc->num_mappings,
1003 GFP_KERNEL);
1004 if (ndbr) {
1005 nd_region = &ndbr->nd_region;
1006 ndbr->enable = ndbr_desc->enable;
1007 ndbr->do_io = ndbr_desc->do_io;
1008 }
1009 region_buf = ndbr;
1010 } else {
1011 nd_region = kzalloc(sizeof(struct nd_region)
1012 + sizeof(struct nd_mapping)
1013 * ndr_desc->num_mappings,
1014 GFP_KERNEL);
1015 region_buf = nd_region;
1016 }
1017
1018 if (!region_buf)
1019 return NULL;
1020 nd_region->id = ida_simple_get(®ion_ida, 0, 0, GFP_KERNEL);
1021 if (nd_region->id < 0)
1022 goto err_id;
1023
1024 nd_region->lane = alloc_percpu(struct nd_percpu_lane);
1025 if (!nd_region->lane)
1026 goto err_percpu;
1027
1028 for (i = 0; i < nr_cpu_ids; i++) {
1029 struct nd_percpu_lane *ndl;
1030
1031 ndl = per_cpu_ptr(nd_region->lane, i);
1032 spin_lock_init(&ndl->lock);
1033 ndl->count = 0;
1034 }
1035
1036 for (i = 0; i < ndr_desc->num_mappings; i++) {
1037 struct nd_mapping_desc *mapping = &ndr_desc->mapping[i];
1038 struct nvdimm *nvdimm = mapping->nvdimm;
1039
1040 nd_region->mapping[i].nvdimm = nvdimm;
1041 nd_region->mapping[i].start = mapping->start;
1042 nd_region->mapping[i].size = mapping->size;
1043 nd_region->mapping[i].position = mapping->position;
1044 INIT_LIST_HEAD(&nd_region->mapping[i].labels);
1045 mutex_init(&nd_region->mapping[i].lock);
1046
1047 get_device(&nvdimm->dev);
1048 }
1049 nd_region->ndr_mappings = ndr_desc->num_mappings;
1050 nd_region->provider_data = ndr_desc->provider_data;
1051 nd_region->nd_set = ndr_desc->nd_set;
1052 nd_region->num_lanes = ndr_desc->num_lanes;
1053 nd_region->flags = ndr_desc->flags;
1054 nd_region->ro = ro;
1055 nd_region->numa_node = ndr_desc->numa_node;
1056 ida_init(&nd_region->ns_ida);
1057 ida_init(&nd_region->btt_ida);
1058 ida_init(&nd_region->pfn_ida);
1059 ida_init(&nd_region->dax_ida);
1060 dev = &nd_region->dev;
1061 dev_set_name(dev, "region%d", nd_region->id);
1062 dev->parent = &nvdimm_bus->dev;
1063 dev->type = dev_type;
1064 dev->groups = ndr_desc->attr_groups;
1065 dev->of_node = ndr_desc->of_node;
1066 nd_region->ndr_size = resource_size(ndr_desc->res);
1067 nd_region->ndr_start = ndr_desc->res->start;
1068 nd_device_register(dev);
1069
1070 return nd_region;
1071
1072 err_percpu:
1073 ida_simple_remove(®ion_ida, nd_region->id);
1074 err_id:
1075 kfree(region_buf);
1076 return NULL;
1077 }
1078
nvdimm_pmem_region_create(struct nvdimm_bus * nvdimm_bus,struct nd_region_desc * ndr_desc)1079 struct nd_region *nvdimm_pmem_region_create(struct nvdimm_bus *nvdimm_bus,
1080 struct nd_region_desc *ndr_desc)
1081 {
1082 ndr_desc->num_lanes = ND_MAX_LANES;
1083 return nd_region_create(nvdimm_bus, ndr_desc, &nd_pmem_device_type,
1084 __func__);
1085 }
1086 EXPORT_SYMBOL_GPL(nvdimm_pmem_region_create);
1087
nvdimm_blk_region_create(struct nvdimm_bus * nvdimm_bus,struct nd_region_desc * ndr_desc)1088 struct nd_region *nvdimm_blk_region_create(struct nvdimm_bus *nvdimm_bus,
1089 struct nd_region_desc *ndr_desc)
1090 {
1091 if (ndr_desc->num_mappings > 1)
1092 return NULL;
1093 ndr_desc->num_lanes = min(ndr_desc->num_lanes, ND_MAX_LANES);
1094 return nd_region_create(nvdimm_bus, ndr_desc, &nd_blk_device_type,
1095 __func__);
1096 }
1097 EXPORT_SYMBOL_GPL(nvdimm_blk_region_create);
1098
nvdimm_volatile_region_create(struct nvdimm_bus * nvdimm_bus,struct nd_region_desc * ndr_desc)1099 struct nd_region *nvdimm_volatile_region_create(struct nvdimm_bus *nvdimm_bus,
1100 struct nd_region_desc *ndr_desc)
1101 {
1102 ndr_desc->num_lanes = ND_MAX_LANES;
1103 return nd_region_create(nvdimm_bus, ndr_desc, &nd_volatile_device_type,
1104 __func__);
1105 }
1106 EXPORT_SYMBOL_GPL(nvdimm_volatile_region_create);
1107
1108 /**
1109 * nvdimm_flush - flush any posted write queues between the cpu and pmem media
1110 * @nd_region: blk or interleaved pmem region
1111 */
nvdimm_flush(struct nd_region * nd_region)1112 void nvdimm_flush(struct nd_region *nd_region)
1113 {
1114 struct nd_region_data *ndrd = dev_get_drvdata(&nd_region->dev);
1115 int i, idx;
1116
1117 /*
1118 * Try to encourage some diversity in flush hint addresses
1119 * across cpus assuming a limited number of flush hints.
1120 */
1121 idx = this_cpu_read(flush_idx);
1122 idx = this_cpu_add_return(flush_idx, hash_32(current->pid + idx, 8));
1123
1124 /*
1125 * The first wmb() is needed to 'sfence' all previous writes
1126 * such that they are architecturally visible for the platform
1127 * buffer flush. Note that we've already arranged for pmem
1128 * writes to avoid the cache via memcpy_flushcache(). The final
1129 * wmb() ensures ordering for the NVDIMM flush write.
1130 */
1131 wmb();
1132 for (i = 0; i < nd_region->ndr_mappings; i++)
1133 if (ndrd_get_flush_wpq(ndrd, i, 0))
1134 writeq(1, ndrd_get_flush_wpq(ndrd, i, idx));
1135 wmb();
1136 }
1137 EXPORT_SYMBOL_GPL(nvdimm_flush);
1138
1139 /**
1140 * nvdimm_has_flush - determine write flushing requirements
1141 * @nd_region: blk or interleaved pmem region
1142 *
1143 * Returns 1 if writes require flushing
1144 * Returns 0 if writes do not require flushing
1145 * Returns -ENXIO if flushing capability can not be determined
1146 */
nvdimm_has_flush(struct nd_region * nd_region)1147 int nvdimm_has_flush(struct nd_region *nd_region)
1148 {
1149 int i;
1150
1151 /* no nvdimm or pmem api == flushing capability unknown */
1152 if (nd_region->ndr_mappings == 0
1153 || !IS_ENABLED(CONFIG_ARCH_HAS_PMEM_API))
1154 return -ENXIO;
1155
1156 for (i = 0; i < nd_region->ndr_mappings; i++) {
1157 struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1158 struct nvdimm *nvdimm = nd_mapping->nvdimm;
1159
1160 /* flush hints present / available */
1161 if (nvdimm->num_flush)
1162 return 1;
1163 }
1164
1165 /*
1166 * The platform defines dimm devices without hints, assume
1167 * platform persistence mechanism like ADR
1168 */
1169 return 0;
1170 }
1171 EXPORT_SYMBOL_GPL(nvdimm_has_flush);
1172
nvdimm_has_cache(struct nd_region * nd_region)1173 int nvdimm_has_cache(struct nd_region *nd_region)
1174 {
1175 return is_nd_pmem(&nd_region->dev) &&
1176 !test_bit(ND_REGION_PERSIST_CACHE, &nd_region->flags);
1177 }
1178 EXPORT_SYMBOL_GPL(nvdimm_has_cache);
1179
nd_region_devs_exit(void)1180 void __exit nd_region_devs_exit(void)
1181 {
1182 ida_destroy(®ion_ida);
1183 }
1184