1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4  */
5 #include <linux/device.h>
6 #include <linux/ndctl.h>
7 #include <linux/uuid.h>
8 #include <linux/slab.h>
9 #include <linux/io.h>
10 #include <linux/nd.h>
11 #include "nd-core.h"
12 #include "label.h"
13 #include "nd.h"
14 
15 static guid_t nvdimm_btt_guid;
16 static guid_t nvdimm_btt2_guid;
17 static guid_t nvdimm_pfn_guid;
18 static guid_t nvdimm_dax_guid;
19 
20 static const char NSINDEX_SIGNATURE[] = "NAMESPACE_INDEX\0";
21 
best_seq(u32 a,u32 b)22 static u32 best_seq(u32 a, u32 b)
23 {
24 	a &= NSINDEX_SEQ_MASK;
25 	b &= NSINDEX_SEQ_MASK;
26 
27 	if (a == 0 || a == b)
28 		return b;
29 	else if (b == 0)
30 		return a;
31 	else if (nd_inc_seq(a) == b)
32 		return b;
33 	else
34 		return a;
35 }
36 
sizeof_namespace_label(struct nvdimm_drvdata * ndd)37 unsigned sizeof_namespace_label(struct nvdimm_drvdata *ndd)
38 {
39 	return ndd->nslabel_size;
40 }
41 
__sizeof_namespace_index(u32 nslot)42 static size_t __sizeof_namespace_index(u32 nslot)
43 {
44 	return ALIGN(sizeof(struct nd_namespace_index) + DIV_ROUND_UP(nslot, 8),
45 			NSINDEX_ALIGN);
46 }
47 
__nvdimm_num_label_slots(struct nvdimm_drvdata * ndd,size_t index_size)48 static int __nvdimm_num_label_slots(struct nvdimm_drvdata *ndd,
49 		size_t index_size)
50 {
51 	return (ndd->nsarea.config_size - index_size * 2) /
52 			sizeof_namespace_label(ndd);
53 }
54 
nvdimm_num_label_slots(struct nvdimm_drvdata * ndd)55 int nvdimm_num_label_slots(struct nvdimm_drvdata *ndd)
56 {
57 	u32 tmp_nslot, n;
58 
59 	tmp_nslot = ndd->nsarea.config_size / sizeof_namespace_label(ndd);
60 	n = __sizeof_namespace_index(tmp_nslot) / NSINDEX_ALIGN;
61 
62 	return __nvdimm_num_label_slots(ndd, NSINDEX_ALIGN * n);
63 }
64 
sizeof_namespace_index(struct nvdimm_drvdata * ndd)65 size_t sizeof_namespace_index(struct nvdimm_drvdata *ndd)
66 {
67 	u32 nslot, space, size;
68 
69 	/*
70 	 * Per UEFI 2.7, the minimum size of the Label Storage Area is large
71 	 * enough to hold 2 index blocks and 2 labels.  The minimum index
72 	 * block size is 256 bytes. The label size is 128 for namespaces
73 	 * prior to version 1.2 and at minimum 256 for version 1.2 and later.
74 	 */
75 	nslot = nvdimm_num_label_slots(ndd);
76 	space = ndd->nsarea.config_size - nslot * sizeof_namespace_label(ndd);
77 	size = __sizeof_namespace_index(nslot) * 2;
78 	if (size <= space && nslot >= 2)
79 		return size / 2;
80 
81 	dev_err(ndd->dev, "label area (%d) too small to host (%d byte) labels\n",
82 			ndd->nsarea.config_size, sizeof_namespace_label(ndd));
83 	return 0;
84 }
85 
__nd_label_validate(struct nvdimm_drvdata * ndd)86 static int __nd_label_validate(struct nvdimm_drvdata *ndd)
87 {
88 	/*
89 	 * On media label format consists of two index blocks followed
90 	 * by an array of labels.  None of these structures are ever
91 	 * updated in place.  A sequence number tracks the current
92 	 * active index and the next one to write, while labels are
93 	 * written to free slots.
94 	 *
95 	 *     +------------+
96 	 *     |            |
97 	 *     |  nsindex0  |
98 	 *     |            |
99 	 *     +------------+
100 	 *     |            |
101 	 *     |  nsindex1  |
102 	 *     |            |
103 	 *     +------------+
104 	 *     |   label0   |
105 	 *     +------------+
106 	 *     |   label1   |
107 	 *     +------------+
108 	 *     |            |
109 	 *      ....nslot...
110 	 *     |            |
111 	 *     +------------+
112 	 *     |   labelN   |
113 	 *     +------------+
114 	 */
115 	struct nd_namespace_index *nsindex[] = {
116 		to_namespace_index(ndd, 0),
117 		to_namespace_index(ndd, 1),
118 	};
119 	const int num_index = ARRAY_SIZE(nsindex);
120 	struct device *dev = ndd->dev;
121 	bool valid[2] = { 0 };
122 	int i, num_valid = 0;
123 	u32 seq;
124 
125 	for (i = 0; i < num_index; i++) {
126 		u32 nslot;
127 		u8 sig[NSINDEX_SIG_LEN];
128 		u64 sum_save, sum, size;
129 		unsigned int version, labelsize;
130 
131 		memcpy(sig, nsindex[i]->sig, NSINDEX_SIG_LEN);
132 		if (memcmp(sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN) != 0) {
133 			dev_dbg(dev, "nsindex%d signature invalid\n", i);
134 			continue;
135 		}
136 
137 		/* label sizes larger than 128 arrived with v1.2 */
138 		version = __le16_to_cpu(nsindex[i]->major) * 100
139 			+ __le16_to_cpu(nsindex[i]->minor);
140 		if (version >= 102)
141 			labelsize = 1 << (7 + nsindex[i]->labelsize);
142 		else
143 			labelsize = 128;
144 
145 		if (labelsize != sizeof_namespace_label(ndd)) {
146 			dev_dbg(dev, "nsindex%d labelsize %d invalid\n",
147 					i, nsindex[i]->labelsize);
148 			continue;
149 		}
150 
151 		sum_save = __le64_to_cpu(nsindex[i]->checksum);
152 		nsindex[i]->checksum = __cpu_to_le64(0);
153 		sum = nd_fletcher64(nsindex[i], sizeof_namespace_index(ndd), 1);
154 		nsindex[i]->checksum = __cpu_to_le64(sum_save);
155 		if (sum != sum_save) {
156 			dev_dbg(dev, "nsindex%d checksum invalid\n", i);
157 			continue;
158 		}
159 
160 		seq = __le32_to_cpu(nsindex[i]->seq);
161 		if ((seq & NSINDEX_SEQ_MASK) == 0) {
162 			dev_dbg(dev, "nsindex%d sequence: %#x invalid\n", i, seq);
163 			continue;
164 		}
165 
166 		/* sanity check the index against expected values */
167 		if (__le64_to_cpu(nsindex[i]->myoff)
168 				!= i * sizeof_namespace_index(ndd)) {
169 			dev_dbg(dev, "nsindex%d myoff: %#llx invalid\n",
170 					i, (unsigned long long)
171 					__le64_to_cpu(nsindex[i]->myoff));
172 			continue;
173 		}
174 		if (__le64_to_cpu(nsindex[i]->otheroff)
175 				!= (!i) * sizeof_namespace_index(ndd)) {
176 			dev_dbg(dev, "nsindex%d otheroff: %#llx invalid\n",
177 					i, (unsigned long long)
178 					__le64_to_cpu(nsindex[i]->otheroff));
179 			continue;
180 		}
181 		if (__le64_to_cpu(nsindex[i]->labeloff)
182 				!= 2 * sizeof_namespace_index(ndd)) {
183 			dev_dbg(dev, "nsindex%d labeloff: %#llx invalid\n",
184 					i, (unsigned long long)
185 					__le64_to_cpu(nsindex[i]->labeloff));
186 			continue;
187 		}
188 
189 		size = __le64_to_cpu(nsindex[i]->mysize);
190 		if (size > sizeof_namespace_index(ndd)
191 				|| size < sizeof(struct nd_namespace_index)) {
192 			dev_dbg(dev, "nsindex%d mysize: %#llx invalid\n", i, size);
193 			continue;
194 		}
195 
196 		nslot = __le32_to_cpu(nsindex[i]->nslot);
197 		if (nslot * sizeof_namespace_label(ndd)
198 				+ 2 * sizeof_namespace_index(ndd)
199 				> ndd->nsarea.config_size) {
200 			dev_dbg(dev, "nsindex%d nslot: %u invalid, config_size: %#x\n",
201 					i, nslot, ndd->nsarea.config_size);
202 			continue;
203 		}
204 		valid[i] = true;
205 		num_valid++;
206 	}
207 
208 	switch (num_valid) {
209 	case 0:
210 		break;
211 	case 1:
212 		for (i = 0; i < num_index; i++)
213 			if (valid[i])
214 				return i;
215 		/* can't have num_valid > 0 but valid[] = { false, false } */
216 		WARN_ON(1);
217 		break;
218 	default:
219 		/* pick the best index... */
220 		seq = best_seq(__le32_to_cpu(nsindex[0]->seq),
221 				__le32_to_cpu(nsindex[1]->seq));
222 		if (seq == (__le32_to_cpu(nsindex[1]->seq) & NSINDEX_SEQ_MASK))
223 			return 1;
224 		else
225 			return 0;
226 		break;
227 	}
228 
229 	return -1;
230 }
231 
nd_label_validate(struct nvdimm_drvdata * ndd)232 static int nd_label_validate(struct nvdimm_drvdata *ndd)
233 {
234 	/*
235 	 * In order to probe for and validate namespace index blocks we
236 	 * need to know the size of the labels, and we can't trust the
237 	 * size of the labels until we validate the index blocks.
238 	 * Resolve this dependency loop by probing for known label
239 	 * sizes, but default to v1.2 256-byte namespace labels if
240 	 * discovery fails.
241 	 */
242 	int label_size[] = { 128, 256 };
243 	int i, rc;
244 
245 	for (i = 0; i < ARRAY_SIZE(label_size); i++) {
246 		ndd->nslabel_size = label_size[i];
247 		rc = __nd_label_validate(ndd);
248 		if (rc >= 0)
249 			return rc;
250 	}
251 
252 	return -1;
253 }
254 
nd_label_copy(struct nvdimm_drvdata * ndd,struct nd_namespace_index * dst,struct nd_namespace_index * src)255 static void nd_label_copy(struct nvdimm_drvdata *ndd,
256 			  struct nd_namespace_index *dst,
257 			  struct nd_namespace_index *src)
258 {
259 	/* just exit if either destination or source is NULL */
260 	if (!dst || !src)
261 		return;
262 
263 	memcpy(dst, src, sizeof_namespace_index(ndd));
264 }
265 
nd_label_base(struct nvdimm_drvdata * ndd)266 static struct nd_namespace_label *nd_label_base(struct nvdimm_drvdata *ndd)
267 {
268 	void *base = to_namespace_index(ndd, 0);
269 
270 	return base + 2 * sizeof_namespace_index(ndd);
271 }
272 
to_slot(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label)273 static int to_slot(struct nvdimm_drvdata *ndd,
274 		struct nd_namespace_label *nd_label)
275 {
276 	unsigned long label, base;
277 
278 	label = (unsigned long) nd_label;
279 	base = (unsigned long) nd_label_base(ndd);
280 
281 	return (label - base) / sizeof_namespace_label(ndd);
282 }
283 
to_label(struct nvdimm_drvdata * ndd,int slot)284 static struct nd_namespace_label *to_label(struct nvdimm_drvdata *ndd, int slot)
285 {
286 	unsigned long label, base;
287 
288 	base = (unsigned long) nd_label_base(ndd);
289 	label = base + sizeof_namespace_label(ndd) * slot;
290 
291 	return (struct nd_namespace_label *) label;
292 }
293 
294 #define for_each_clear_bit_le(bit, addr, size) \
295 	for ((bit) = find_next_zero_bit_le((addr), (size), 0);  \
296 	     (bit) < (size);                                    \
297 	     (bit) = find_next_zero_bit_le((addr), (size), (bit) + 1))
298 
299 /**
300  * preamble_index - common variable initialization for nd_label_* routines
301  * @ndd: dimm container for the relevant label set
302  * @idx: namespace_index index
303  * @nsindex_out: on return set to the currently active namespace index
304  * @free: on return set to the free label bitmap in the index
305  * @nslot: on return set to the number of slots in the label space
306  */
preamble_index(struct nvdimm_drvdata * ndd,int idx,struct nd_namespace_index ** nsindex_out,unsigned long ** free,u32 * nslot)307 static bool preamble_index(struct nvdimm_drvdata *ndd, int idx,
308 		struct nd_namespace_index **nsindex_out,
309 		unsigned long **free, u32 *nslot)
310 {
311 	struct nd_namespace_index *nsindex;
312 
313 	nsindex = to_namespace_index(ndd, idx);
314 	if (nsindex == NULL)
315 		return false;
316 
317 	*free = (unsigned long *) nsindex->free;
318 	*nslot = __le32_to_cpu(nsindex->nslot);
319 	*nsindex_out = nsindex;
320 
321 	return true;
322 }
323 
nd_label_gen_id(struct nd_label_id * label_id,u8 * uuid,u32 flags)324 char *nd_label_gen_id(struct nd_label_id *label_id, u8 *uuid, u32 flags)
325 {
326 	if (!label_id || !uuid)
327 		return NULL;
328 	snprintf(label_id->id, ND_LABEL_ID_SIZE, "%s-%pUb",
329 			flags & NSLABEL_FLAG_LOCAL ? "blk" : "pmem", uuid);
330 	return label_id->id;
331 }
332 
preamble_current(struct nvdimm_drvdata * ndd,struct nd_namespace_index ** nsindex,unsigned long ** free,u32 * nslot)333 static bool preamble_current(struct nvdimm_drvdata *ndd,
334 		struct nd_namespace_index **nsindex,
335 		unsigned long **free, u32 *nslot)
336 {
337 	return preamble_index(ndd, ndd->ns_current, nsindex,
338 			free, nslot);
339 }
340 
preamble_next(struct nvdimm_drvdata * ndd,struct nd_namespace_index ** nsindex,unsigned long ** free,u32 * nslot)341 static bool preamble_next(struct nvdimm_drvdata *ndd,
342 		struct nd_namespace_index **nsindex,
343 		unsigned long **free, u32 *nslot)
344 {
345 	return preamble_index(ndd, ndd->ns_next, nsindex,
346 			free, nslot);
347 }
348 
slot_valid(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label,u32 slot)349 static bool slot_valid(struct nvdimm_drvdata *ndd,
350 		struct nd_namespace_label *nd_label, u32 slot)
351 {
352 	/* check that we are written where we expect to be written */
353 	if (slot != __le32_to_cpu(nd_label->slot))
354 		return false;
355 
356 	/* check checksum */
357 	if (namespace_label_has(ndd, checksum)) {
358 		u64 sum, sum_save;
359 
360 		sum_save = __le64_to_cpu(nd_label->checksum);
361 		nd_label->checksum = __cpu_to_le64(0);
362 		sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
363 		nd_label->checksum = __cpu_to_le64(sum_save);
364 		if (sum != sum_save) {
365 			dev_dbg(ndd->dev, "fail checksum. slot: %d expect: %#llx\n",
366 				slot, sum);
367 			return false;
368 		}
369 	}
370 
371 	return true;
372 }
373 
nd_label_reserve_dpa(struct nvdimm_drvdata * ndd)374 int nd_label_reserve_dpa(struct nvdimm_drvdata *ndd)
375 {
376 	struct nd_namespace_index *nsindex;
377 	unsigned long *free;
378 	u32 nslot, slot;
379 
380 	if (!preamble_current(ndd, &nsindex, &free, &nslot))
381 		return 0; /* no label, nothing to reserve */
382 
383 	for_each_clear_bit_le(slot, free, nslot) {
384 		struct nvdimm *nvdimm = to_nvdimm(ndd->dev);
385 		struct nd_namespace_label *nd_label;
386 		struct nd_region *nd_region = NULL;
387 		u8 label_uuid[NSLABEL_UUID_LEN];
388 		struct nd_label_id label_id;
389 		struct resource *res;
390 		u32 flags;
391 
392 		nd_label = to_label(ndd, slot);
393 
394 		if (!slot_valid(ndd, nd_label, slot))
395 			continue;
396 
397 		memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
398 		flags = __le32_to_cpu(nd_label->flags);
399 		if (test_bit(NDD_NOBLK, &nvdimm->flags))
400 			flags &= ~NSLABEL_FLAG_LOCAL;
401 		nd_label_gen_id(&label_id, label_uuid, flags);
402 		res = nvdimm_allocate_dpa(ndd, &label_id,
403 				__le64_to_cpu(nd_label->dpa),
404 				__le64_to_cpu(nd_label->rawsize));
405 		nd_dbg_dpa(nd_region, ndd, res, "reserve\n");
406 		if (!res)
407 			return -EBUSY;
408 	}
409 
410 	return 0;
411 }
412 
nd_label_data_init(struct nvdimm_drvdata * ndd)413 int nd_label_data_init(struct nvdimm_drvdata *ndd)
414 {
415 	size_t config_size, read_size, max_xfer, offset;
416 	struct nd_namespace_index *nsindex;
417 	unsigned int i;
418 	int rc = 0;
419 	u32 nslot;
420 
421 	if (ndd->data)
422 		return 0;
423 
424 	if (ndd->nsarea.status || ndd->nsarea.max_xfer == 0) {
425 		dev_dbg(ndd->dev, "failed to init config data area: (%u:%u)\n",
426 			ndd->nsarea.max_xfer, ndd->nsarea.config_size);
427 		return -ENXIO;
428 	}
429 
430 	/*
431 	 * We need to determine the maximum index area as this is the section
432 	 * we must read and validate before we can start processing labels.
433 	 *
434 	 * If the area is too small to contain the two indexes and 2 labels
435 	 * then we abort.
436 	 *
437 	 * Start at a label size of 128 as this should result in the largest
438 	 * possible namespace index size.
439 	 */
440 	ndd->nslabel_size = 128;
441 	read_size = sizeof_namespace_index(ndd) * 2;
442 	if (!read_size)
443 		return -ENXIO;
444 
445 	/* Allocate config data */
446 	config_size = ndd->nsarea.config_size;
447 	ndd->data = kvzalloc(config_size, GFP_KERNEL);
448 	if (!ndd->data)
449 		return -ENOMEM;
450 
451 	/*
452 	 * We want to guarantee as few reads as possible while conserving
453 	 * memory. To do that we figure out how much unused space will be left
454 	 * in the last read, divide that by the total number of reads it is
455 	 * going to take given our maximum transfer size, and then reduce our
456 	 * maximum transfer size based on that result.
457 	 */
458 	max_xfer = min_t(size_t, ndd->nsarea.max_xfer, config_size);
459 	if (read_size < max_xfer) {
460 		/* trim waste */
461 		max_xfer -= ((max_xfer - 1) - (config_size - 1) % max_xfer) /
462 			    DIV_ROUND_UP(config_size, max_xfer);
463 		/* make certain we read indexes in exactly 1 read */
464 		if (max_xfer < read_size)
465 			max_xfer = read_size;
466 	}
467 
468 	/* Make our initial read size a multiple of max_xfer size */
469 	read_size = min(DIV_ROUND_UP(read_size, max_xfer) * max_xfer,
470 			config_size);
471 
472 	/* Read the index data */
473 	rc = nvdimm_get_config_data(ndd, ndd->data, 0, read_size);
474 	if (rc)
475 		goto out_err;
476 
477 	/* Validate index data, if not valid assume all labels are invalid */
478 	ndd->ns_current = nd_label_validate(ndd);
479 	if (ndd->ns_current < 0)
480 		return 0;
481 
482 	/* Record our index values */
483 	ndd->ns_next = nd_label_next_nsindex(ndd->ns_current);
484 
485 	/* Copy "current" index on top of the "next" index */
486 	nsindex = to_current_namespace_index(ndd);
487 	nd_label_copy(ndd, to_next_namespace_index(ndd), nsindex);
488 
489 	/* Determine starting offset for label data */
490 	offset = __le64_to_cpu(nsindex->labeloff);
491 	nslot = __le32_to_cpu(nsindex->nslot);
492 
493 	/* Loop through the free list pulling in any active labels */
494 	for (i = 0; i < nslot; i++, offset += ndd->nslabel_size) {
495 		size_t label_read_size;
496 
497 		/* zero out the unused labels */
498 		if (test_bit_le(i, nsindex->free)) {
499 			memset(ndd->data + offset, 0, ndd->nslabel_size);
500 			continue;
501 		}
502 
503 		/* if we already read past here then just continue */
504 		if (offset + ndd->nslabel_size <= read_size)
505 			continue;
506 
507 		/* if we haven't read in a while reset our read_size offset */
508 		if (read_size < offset)
509 			read_size = offset;
510 
511 		/* determine how much more will be read after this next call. */
512 		label_read_size = offset + ndd->nslabel_size - read_size;
513 		label_read_size = DIV_ROUND_UP(label_read_size, max_xfer) *
514 				  max_xfer;
515 
516 		/* truncate last read if needed */
517 		if (read_size + label_read_size > config_size)
518 			label_read_size = config_size - read_size;
519 
520 		/* Read the label data */
521 		rc = nvdimm_get_config_data(ndd, ndd->data + read_size,
522 					    read_size, label_read_size);
523 		if (rc)
524 			goto out_err;
525 
526 		/* push read_size to next read offset */
527 		read_size += label_read_size;
528 	}
529 
530 	dev_dbg(ndd->dev, "len: %zu rc: %d\n", offset, rc);
531 out_err:
532 	return rc;
533 }
534 
nd_label_active_count(struct nvdimm_drvdata * ndd)535 int nd_label_active_count(struct nvdimm_drvdata *ndd)
536 {
537 	struct nd_namespace_index *nsindex;
538 	unsigned long *free;
539 	u32 nslot, slot;
540 	int count = 0;
541 
542 	if (!preamble_current(ndd, &nsindex, &free, &nslot))
543 		return 0;
544 
545 	for_each_clear_bit_le(slot, free, nslot) {
546 		struct nd_namespace_label *nd_label;
547 
548 		nd_label = to_label(ndd, slot);
549 
550 		if (!slot_valid(ndd, nd_label, slot)) {
551 			u32 label_slot = __le32_to_cpu(nd_label->slot);
552 			u64 size = __le64_to_cpu(nd_label->rawsize);
553 			u64 dpa = __le64_to_cpu(nd_label->dpa);
554 
555 			dev_dbg(ndd->dev,
556 				"slot%d invalid slot: %d dpa: %llx size: %llx\n",
557 					slot, label_slot, dpa, size);
558 			continue;
559 		}
560 		count++;
561 	}
562 	return count;
563 }
564 
nd_label_active(struct nvdimm_drvdata * ndd,int n)565 struct nd_namespace_label *nd_label_active(struct nvdimm_drvdata *ndd, int n)
566 {
567 	struct nd_namespace_index *nsindex;
568 	unsigned long *free;
569 	u32 nslot, slot;
570 
571 	if (!preamble_current(ndd, &nsindex, &free, &nslot))
572 		return NULL;
573 
574 	for_each_clear_bit_le(slot, free, nslot) {
575 		struct nd_namespace_label *nd_label;
576 
577 		nd_label = to_label(ndd, slot);
578 		if (!slot_valid(ndd, nd_label, slot))
579 			continue;
580 
581 		if (n-- == 0)
582 			return to_label(ndd, slot);
583 	}
584 
585 	return NULL;
586 }
587 
nd_label_alloc_slot(struct nvdimm_drvdata * ndd)588 u32 nd_label_alloc_slot(struct nvdimm_drvdata *ndd)
589 {
590 	struct nd_namespace_index *nsindex;
591 	unsigned long *free;
592 	u32 nslot, slot;
593 
594 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
595 		return UINT_MAX;
596 
597 	WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
598 
599 	slot = find_next_bit_le(free, nslot, 0);
600 	if (slot == nslot)
601 		return UINT_MAX;
602 
603 	clear_bit_le(slot, free);
604 
605 	return slot;
606 }
607 
nd_label_free_slot(struct nvdimm_drvdata * ndd,u32 slot)608 bool nd_label_free_slot(struct nvdimm_drvdata *ndd, u32 slot)
609 {
610 	struct nd_namespace_index *nsindex;
611 	unsigned long *free;
612 	u32 nslot;
613 
614 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
615 		return false;
616 
617 	WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
618 
619 	if (slot < nslot)
620 		return !test_and_set_bit_le(slot, free);
621 	return false;
622 }
623 
nd_label_nfree(struct nvdimm_drvdata * ndd)624 u32 nd_label_nfree(struct nvdimm_drvdata *ndd)
625 {
626 	struct nd_namespace_index *nsindex;
627 	unsigned long *free;
628 	u32 nslot;
629 
630 	WARN_ON(!is_nvdimm_bus_locked(ndd->dev));
631 
632 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
633 		return nvdimm_num_label_slots(ndd);
634 
635 	return bitmap_weight(free, nslot);
636 }
637 
nd_label_write_index(struct nvdimm_drvdata * ndd,int index,u32 seq,unsigned long flags)638 static int nd_label_write_index(struct nvdimm_drvdata *ndd, int index, u32 seq,
639 		unsigned long flags)
640 {
641 	struct nd_namespace_index *nsindex;
642 	unsigned long offset;
643 	u64 checksum;
644 	u32 nslot;
645 	int rc;
646 
647 	nsindex = to_namespace_index(ndd, index);
648 	if (flags & ND_NSINDEX_INIT)
649 		nslot = nvdimm_num_label_slots(ndd);
650 	else
651 		nslot = __le32_to_cpu(nsindex->nslot);
652 
653 	memcpy(nsindex->sig, NSINDEX_SIGNATURE, NSINDEX_SIG_LEN);
654 	memset(&nsindex->flags, 0, 3);
655 	nsindex->labelsize = sizeof_namespace_label(ndd) >> 8;
656 	nsindex->seq = __cpu_to_le32(seq);
657 	offset = (unsigned long) nsindex
658 		- (unsigned long) to_namespace_index(ndd, 0);
659 	nsindex->myoff = __cpu_to_le64(offset);
660 	nsindex->mysize = __cpu_to_le64(sizeof_namespace_index(ndd));
661 	offset = (unsigned long) to_namespace_index(ndd,
662 			nd_label_next_nsindex(index))
663 		- (unsigned long) to_namespace_index(ndd, 0);
664 	nsindex->otheroff = __cpu_to_le64(offset);
665 	offset = (unsigned long) nd_label_base(ndd)
666 		- (unsigned long) to_namespace_index(ndd, 0);
667 	nsindex->labeloff = __cpu_to_le64(offset);
668 	nsindex->nslot = __cpu_to_le32(nslot);
669 	nsindex->major = __cpu_to_le16(1);
670 	if (sizeof_namespace_label(ndd) < 256)
671 		nsindex->minor = __cpu_to_le16(1);
672 	else
673 		nsindex->minor = __cpu_to_le16(2);
674 	nsindex->checksum = __cpu_to_le64(0);
675 	if (flags & ND_NSINDEX_INIT) {
676 		unsigned long *free = (unsigned long *) nsindex->free;
677 		u32 nfree = ALIGN(nslot, BITS_PER_LONG);
678 		int last_bits, i;
679 
680 		memset(nsindex->free, 0xff, nfree / 8);
681 		for (i = 0, last_bits = nfree - nslot; i < last_bits; i++)
682 			clear_bit_le(nslot + i, free);
683 	}
684 	checksum = nd_fletcher64(nsindex, sizeof_namespace_index(ndd), 1);
685 	nsindex->checksum = __cpu_to_le64(checksum);
686 	rc = nvdimm_set_config_data(ndd, __le64_to_cpu(nsindex->myoff),
687 			nsindex, sizeof_namespace_index(ndd));
688 	if (rc < 0)
689 		return rc;
690 
691 	if (flags & ND_NSINDEX_INIT)
692 		return 0;
693 
694 	/* copy the index we just wrote to the new 'next' */
695 	WARN_ON(index != ndd->ns_next);
696 	nd_label_copy(ndd, to_current_namespace_index(ndd), nsindex);
697 	ndd->ns_current = nd_label_next_nsindex(ndd->ns_current);
698 	ndd->ns_next = nd_label_next_nsindex(ndd->ns_next);
699 	WARN_ON(ndd->ns_current == ndd->ns_next);
700 
701 	return 0;
702 }
703 
nd_label_offset(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label)704 static unsigned long nd_label_offset(struct nvdimm_drvdata *ndd,
705 		struct nd_namespace_label *nd_label)
706 {
707 	return (unsigned long) nd_label
708 		- (unsigned long) to_namespace_index(ndd, 0);
709 }
710 
to_nvdimm_cclass(guid_t * guid)711 enum nvdimm_claim_class to_nvdimm_cclass(guid_t *guid)
712 {
713 	if (guid_equal(guid, &nvdimm_btt_guid))
714 		return NVDIMM_CCLASS_BTT;
715 	else if (guid_equal(guid, &nvdimm_btt2_guid))
716 		return NVDIMM_CCLASS_BTT2;
717 	else if (guid_equal(guid, &nvdimm_pfn_guid))
718 		return NVDIMM_CCLASS_PFN;
719 	else if (guid_equal(guid, &nvdimm_dax_guid))
720 		return NVDIMM_CCLASS_DAX;
721 	else if (guid_equal(guid, &guid_null))
722 		return NVDIMM_CCLASS_NONE;
723 
724 	return NVDIMM_CCLASS_UNKNOWN;
725 }
726 
to_abstraction_guid(enum nvdimm_claim_class claim_class,guid_t * target)727 static const guid_t *to_abstraction_guid(enum nvdimm_claim_class claim_class,
728 	guid_t *target)
729 {
730 	if (claim_class == NVDIMM_CCLASS_BTT)
731 		return &nvdimm_btt_guid;
732 	else if (claim_class == NVDIMM_CCLASS_BTT2)
733 		return &nvdimm_btt2_guid;
734 	else if (claim_class == NVDIMM_CCLASS_PFN)
735 		return &nvdimm_pfn_guid;
736 	else if (claim_class == NVDIMM_CCLASS_DAX)
737 		return &nvdimm_dax_guid;
738 	else if (claim_class == NVDIMM_CCLASS_UNKNOWN) {
739 		/*
740 		 * If we're modifying a namespace for which we don't
741 		 * know the claim_class, don't touch the existing guid.
742 		 */
743 		return target;
744 	} else
745 		return &guid_null;
746 }
747 
reap_victim(struct nd_mapping * nd_mapping,struct nd_label_ent * victim)748 static void reap_victim(struct nd_mapping *nd_mapping,
749 		struct nd_label_ent *victim)
750 {
751 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
752 	u32 slot = to_slot(ndd, victim->label);
753 
754 	dev_dbg(ndd->dev, "free: %d\n", slot);
755 	nd_label_free_slot(ndd, slot);
756 	victim->label = NULL;
757 }
758 
__pmem_label_update(struct nd_region * nd_region,struct nd_mapping * nd_mapping,struct nd_namespace_pmem * nspm,int pos,unsigned long flags)759 static int __pmem_label_update(struct nd_region *nd_region,
760 		struct nd_mapping *nd_mapping, struct nd_namespace_pmem *nspm,
761 		int pos, unsigned long flags)
762 {
763 	struct nd_namespace_common *ndns = &nspm->nsio.common;
764 	struct nd_interleave_set *nd_set = nd_region->nd_set;
765 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
766 	struct nd_namespace_label *nd_label;
767 	struct nd_namespace_index *nsindex;
768 	struct nd_label_ent *label_ent;
769 	struct nd_label_id label_id;
770 	struct resource *res;
771 	unsigned long *free;
772 	u32 nslot, slot;
773 	size_t offset;
774 	u64 cookie;
775 	int rc;
776 
777 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
778 		return -ENXIO;
779 
780 	cookie = nd_region_interleave_set_cookie(nd_region, nsindex);
781 	nd_label_gen_id(&label_id, nspm->uuid, 0);
782 	for_each_dpa_resource(ndd, res)
783 		if (strcmp(res->name, label_id.id) == 0)
784 			break;
785 
786 	if (!res) {
787 		WARN_ON_ONCE(1);
788 		return -ENXIO;
789 	}
790 
791 	/* allocate and write the label to the staging (next) index */
792 	slot = nd_label_alloc_slot(ndd);
793 	if (slot == UINT_MAX)
794 		return -ENXIO;
795 	dev_dbg(ndd->dev, "allocated: %d\n", slot);
796 
797 	nd_label = to_label(ndd, slot);
798 	memset(nd_label, 0, sizeof_namespace_label(ndd));
799 	memcpy(nd_label->uuid, nspm->uuid, NSLABEL_UUID_LEN);
800 	if (nspm->alt_name)
801 		memcpy(nd_label->name, nspm->alt_name, NSLABEL_NAME_LEN);
802 	nd_label->flags = __cpu_to_le32(flags);
803 	nd_label->nlabel = __cpu_to_le16(nd_region->ndr_mappings);
804 	nd_label->position = __cpu_to_le16(pos);
805 	nd_label->isetcookie = __cpu_to_le64(cookie);
806 	nd_label->rawsize = __cpu_to_le64(resource_size(res));
807 	nd_label->lbasize = __cpu_to_le64(nspm->lbasize);
808 	nd_label->dpa = __cpu_to_le64(res->start);
809 	nd_label->slot = __cpu_to_le32(slot);
810 	if (namespace_label_has(ndd, type_guid))
811 		guid_copy(&nd_label->type_guid, &nd_set->type_guid);
812 	if (namespace_label_has(ndd, abstraction_guid))
813 		guid_copy(&nd_label->abstraction_guid,
814 				to_abstraction_guid(ndns->claim_class,
815 					&nd_label->abstraction_guid));
816 	if (namespace_label_has(ndd, checksum)) {
817 		u64 sum;
818 
819 		nd_label->checksum = __cpu_to_le64(0);
820 		sum = nd_fletcher64(nd_label, sizeof_namespace_label(ndd), 1);
821 		nd_label->checksum = __cpu_to_le64(sum);
822 	}
823 	nd_dbg_dpa(nd_region, ndd, res, "\n");
824 
825 	/* update label */
826 	offset = nd_label_offset(ndd, nd_label);
827 	rc = nvdimm_set_config_data(ndd, offset, nd_label,
828 			sizeof_namespace_label(ndd));
829 	if (rc < 0)
830 		return rc;
831 
832 	/* Garbage collect the previous label */
833 	mutex_lock(&nd_mapping->lock);
834 	list_for_each_entry(label_ent, &nd_mapping->labels, list) {
835 		if (!label_ent->label)
836 			continue;
837 		if (test_and_clear_bit(ND_LABEL_REAP, &label_ent->flags)
838 				|| memcmp(nspm->uuid, label_ent->label->uuid,
839 					NSLABEL_UUID_LEN) == 0)
840 			reap_victim(nd_mapping, label_ent);
841 	}
842 
843 	/* update index */
844 	rc = nd_label_write_index(ndd, ndd->ns_next,
845 			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
846 	if (rc == 0) {
847 		list_for_each_entry(label_ent, &nd_mapping->labels, list)
848 			if (!label_ent->label) {
849 				label_ent->label = nd_label;
850 				nd_label = NULL;
851 				break;
852 			}
853 		dev_WARN_ONCE(&nspm->nsio.common.dev, nd_label,
854 				"failed to track label: %d\n",
855 				to_slot(ndd, nd_label));
856 		if (nd_label)
857 			rc = -ENXIO;
858 	}
859 	mutex_unlock(&nd_mapping->lock);
860 
861 	return rc;
862 }
863 
is_old_resource(struct resource * res,struct resource ** list,int n)864 static bool is_old_resource(struct resource *res, struct resource **list, int n)
865 {
866 	int i;
867 
868 	if (res->flags & DPA_RESOURCE_ADJUSTED)
869 		return false;
870 	for (i = 0; i < n; i++)
871 		if (res == list[i])
872 			return true;
873 	return false;
874 }
875 
to_resource(struct nvdimm_drvdata * ndd,struct nd_namespace_label * nd_label)876 static struct resource *to_resource(struct nvdimm_drvdata *ndd,
877 		struct nd_namespace_label *nd_label)
878 {
879 	struct resource *res;
880 
881 	for_each_dpa_resource(ndd, res) {
882 		if (res->start != __le64_to_cpu(nd_label->dpa))
883 			continue;
884 		if (resource_size(res) != __le64_to_cpu(nd_label->rawsize))
885 			continue;
886 		return res;
887 	}
888 
889 	return NULL;
890 }
891 
892 /*
893  * 1/ Account all the labels that can be freed after this update
894  * 2/ Allocate and write the label to the staging (next) index
895  * 3/ Record the resources in the namespace device
896  */
__blk_label_update(struct nd_region * nd_region,struct nd_mapping * nd_mapping,struct nd_namespace_blk * nsblk,int num_labels)897 static int __blk_label_update(struct nd_region *nd_region,
898 		struct nd_mapping *nd_mapping, struct nd_namespace_blk *nsblk,
899 		int num_labels)
900 {
901 	int i, alloc, victims, nfree, old_num_resources, nlabel, rc = -ENXIO;
902 	struct nd_interleave_set *nd_set = nd_region->nd_set;
903 	struct nd_namespace_common *ndns = &nsblk->common;
904 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
905 	struct nd_namespace_label *nd_label;
906 	struct nd_label_ent *label_ent, *e;
907 	struct nd_namespace_index *nsindex;
908 	unsigned long *free, *victim_map = NULL;
909 	struct resource *res, **old_res_list;
910 	struct nd_label_id label_id;
911 	u8 uuid[NSLABEL_UUID_LEN];
912 	int min_dpa_idx = 0;
913 	LIST_HEAD(list);
914 	u32 nslot, slot;
915 
916 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
917 		return -ENXIO;
918 
919 	old_res_list = nsblk->res;
920 	nfree = nd_label_nfree(ndd);
921 	old_num_resources = nsblk->num_resources;
922 	nd_label_gen_id(&label_id, nsblk->uuid, NSLABEL_FLAG_LOCAL);
923 
924 	/*
925 	 * We need to loop over the old resources a few times, which seems a
926 	 * bit inefficient, but we need to know that we have the label
927 	 * space before we start mutating the tracking structures.
928 	 * Otherwise the recovery method of last resort for userspace is
929 	 * disable and re-enable the parent region.
930 	 */
931 	alloc = 0;
932 	for_each_dpa_resource(ndd, res) {
933 		if (strcmp(res->name, label_id.id) != 0)
934 			continue;
935 		if (!is_old_resource(res, old_res_list, old_num_resources))
936 			alloc++;
937 	}
938 
939 	victims = 0;
940 	if (old_num_resources) {
941 		/* convert old local-label-map to dimm-slot victim-map */
942 		victim_map = bitmap_zalloc(nslot, GFP_KERNEL);
943 		if (!victim_map)
944 			return -ENOMEM;
945 
946 		/* mark unused labels for garbage collection */
947 		for_each_clear_bit_le(slot, free, nslot) {
948 			nd_label = to_label(ndd, slot);
949 			memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
950 			if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
951 				continue;
952 			res = to_resource(ndd, nd_label);
953 			if (res && is_old_resource(res, old_res_list,
954 						old_num_resources))
955 				continue;
956 			slot = to_slot(ndd, nd_label);
957 			set_bit(slot, victim_map);
958 			victims++;
959 		}
960 	}
961 
962 	/* don't allow updates that consume the last label */
963 	if (nfree - alloc < 0 || nfree - alloc + victims < 1) {
964 		dev_info(&nsblk->common.dev, "insufficient label space\n");
965 		bitmap_free(victim_map);
966 		return -ENOSPC;
967 	}
968 	/* from here on we need to abort on error */
969 
970 
971 	/* assign all resources to the namespace before writing the labels */
972 	nsblk->res = NULL;
973 	nsblk->num_resources = 0;
974 	for_each_dpa_resource(ndd, res) {
975 		if (strcmp(res->name, label_id.id) != 0)
976 			continue;
977 		if (!nsblk_add_resource(nd_region, ndd, nsblk, res->start)) {
978 			rc = -ENOMEM;
979 			goto abort;
980 		}
981 	}
982 
983 	/*
984 	 * Find the resource associated with the first label in the set
985 	 * per the v1.2 namespace specification.
986 	 */
987 	for (i = 0; i < nsblk->num_resources; i++) {
988 		struct resource *min = nsblk->res[min_dpa_idx];
989 
990 		res = nsblk->res[i];
991 		if (res->start < min->start)
992 			min_dpa_idx = i;
993 	}
994 
995 	for (i = 0; i < nsblk->num_resources; i++) {
996 		size_t offset;
997 
998 		res = nsblk->res[i];
999 		if (is_old_resource(res, old_res_list, old_num_resources))
1000 			continue; /* carry-over */
1001 		slot = nd_label_alloc_slot(ndd);
1002 		if (slot == UINT_MAX)
1003 			goto abort;
1004 		dev_dbg(ndd->dev, "allocated: %d\n", slot);
1005 
1006 		nd_label = to_label(ndd, slot);
1007 		memset(nd_label, 0, sizeof_namespace_label(ndd));
1008 		memcpy(nd_label->uuid, nsblk->uuid, NSLABEL_UUID_LEN);
1009 		if (nsblk->alt_name)
1010 			memcpy(nd_label->name, nsblk->alt_name,
1011 					NSLABEL_NAME_LEN);
1012 		nd_label->flags = __cpu_to_le32(NSLABEL_FLAG_LOCAL);
1013 
1014 		/*
1015 		 * Use the presence of the type_guid as a flag to
1016 		 * determine isetcookie usage and nlabel + position
1017 		 * policy for blk-aperture namespaces.
1018 		 */
1019 		if (namespace_label_has(ndd, type_guid)) {
1020 			if (i == min_dpa_idx) {
1021 				nd_label->nlabel = __cpu_to_le16(nsblk->num_resources);
1022 				nd_label->position = __cpu_to_le16(0);
1023 			} else {
1024 				nd_label->nlabel = __cpu_to_le16(0xffff);
1025 				nd_label->position = __cpu_to_le16(0xffff);
1026 			}
1027 			nd_label->isetcookie = __cpu_to_le64(nd_set->cookie2);
1028 		} else {
1029 			nd_label->nlabel = __cpu_to_le16(0); /* N/A */
1030 			nd_label->position = __cpu_to_le16(0); /* N/A */
1031 			nd_label->isetcookie = __cpu_to_le64(0); /* N/A */
1032 		}
1033 
1034 		nd_label->dpa = __cpu_to_le64(res->start);
1035 		nd_label->rawsize = __cpu_to_le64(resource_size(res));
1036 		nd_label->lbasize = __cpu_to_le64(nsblk->lbasize);
1037 		nd_label->slot = __cpu_to_le32(slot);
1038 		if (namespace_label_has(ndd, type_guid))
1039 			guid_copy(&nd_label->type_guid, &nd_set->type_guid);
1040 		if (namespace_label_has(ndd, abstraction_guid))
1041 			guid_copy(&nd_label->abstraction_guid,
1042 					to_abstraction_guid(ndns->claim_class,
1043 						&nd_label->abstraction_guid));
1044 
1045 		if (namespace_label_has(ndd, checksum)) {
1046 			u64 sum;
1047 
1048 			nd_label->checksum = __cpu_to_le64(0);
1049 			sum = nd_fletcher64(nd_label,
1050 					sizeof_namespace_label(ndd), 1);
1051 			nd_label->checksum = __cpu_to_le64(sum);
1052 		}
1053 
1054 		/* update label */
1055 		offset = nd_label_offset(ndd, nd_label);
1056 		rc = nvdimm_set_config_data(ndd, offset, nd_label,
1057 				sizeof_namespace_label(ndd));
1058 		if (rc < 0)
1059 			goto abort;
1060 	}
1061 
1062 	/* free up now unused slots in the new index */
1063 	for_each_set_bit(slot, victim_map, victim_map ? nslot : 0) {
1064 		dev_dbg(ndd->dev, "free: %d\n", slot);
1065 		nd_label_free_slot(ndd, slot);
1066 	}
1067 
1068 	/* update index */
1069 	rc = nd_label_write_index(ndd, ndd->ns_next,
1070 			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1071 	if (rc)
1072 		goto abort;
1073 
1074 	/*
1075 	 * Now that the on-dimm labels are up to date, fix up the tracking
1076 	 * entries in nd_mapping->labels
1077 	 */
1078 	nlabel = 0;
1079 	mutex_lock(&nd_mapping->lock);
1080 	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1081 		nd_label = label_ent->label;
1082 		if (!nd_label)
1083 			continue;
1084 		nlabel++;
1085 		memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1086 		if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1087 			continue;
1088 		nlabel--;
1089 		list_move(&label_ent->list, &list);
1090 		label_ent->label = NULL;
1091 	}
1092 	list_splice_tail_init(&list, &nd_mapping->labels);
1093 	mutex_unlock(&nd_mapping->lock);
1094 
1095 	if (nlabel + nsblk->num_resources > num_labels) {
1096 		/*
1097 		 * Bug, we can't end up with more resources than
1098 		 * available labels
1099 		 */
1100 		WARN_ON_ONCE(1);
1101 		rc = -ENXIO;
1102 		goto out;
1103 	}
1104 
1105 	mutex_lock(&nd_mapping->lock);
1106 	label_ent = list_first_entry_or_null(&nd_mapping->labels,
1107 			typeof(*label_ent), list);
1108 	if (!label_ent) {
1109 		WARN_ON(1);
1110 		mutex_unlock(&nd_mapping->lock);
1111 		rc = -ENXIO;
1112 		goto out;
1113 	}
1114 	for_each_clear_bit_le(slot, free, nslot) {
1115 		nd_label = to_label(ndd, slot);
1116 		memcpy(uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1117 		if (memcmp(uuid, nsblk->uuid, NSLABEL_UUID_LEN) != 0)
1118 			continue;
1119 		res = to_resource(ndd, nd_label);
1120 		res->flags &= ~DPA_RESOURCE_ADJUSTED;
1121 		dev_vdbg(&nsblk->common.dev, "assign label slot: %d\n", slot);
1122 		list_for_each_entry_from(label_ent, &nd_mapping->labels, list) {
1123 			if (label_ent->label)
1124 				continue;
1125 			label_ent->label = nd_label;
1126 			nd_label = NULL;
1127 			break;
1128 		}
1129 		if (nd_label)
1130 			dev_WARN(&nsblk->common.dev,
1131 					"failed to track label slot%d\n", slot);
1132 	}
1133 	mutex_unlock(&nd_mapping->lock);
1134 
1135  out:
1136 	kfree(old_res_list);
1137 	bitmap_free(victim_map);
1138 	return rc;
1139 
1140  abort:
1141 	/*
1142 	 * 1/ repair the allocated label bitmap in the index
1143 	 * 2/ restore the resource list
1144 	 */
1145 	nd_label_copy(ndd, nsindex, to_current_namespace_index(ndd));
1146 	kfree(nsblk->res);
1147 	nsblk->res = old_res_list;
1148 	nsblk->num_resources = old_num_resources;
1149 	old_res_list = NULL;
1150 	goto out;
1151 }
1152 
init_labels(struct nd_mapping * nd_mapping,int num_labels)1153 static int init_labels(struct nd_mapping *nd_mapping, int num_labels)
1154 {
1155 	int i, old_num_labels = 0;
1156 	struct nd_label_ent *label_ent;
1157 	struct nd_namespace_index *nsindex;
1158 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1159 
1160 	mutex_lock(&nd_mapping->lock);
1161 	list_for_each_entry(label_ent, &nd_mapping->labels, list)
1162 		old_num_labels++;
1163 	mutex_unlock(&nd_mapping->lock);
1164 
1165 	/*
1166 	 * We need to preserve all the old labels for the mapping so
1167 	 * they can be garbage collected after writing the new labels.
1168 	 */
1169 	for (i = old_num_labels; i < num_labels; i++) {
1170 		label_ent = kzalloc(sizeof(*label_ent), GFP_KERNEL);
1171 		if (!label_ent)
1172 			return -ENOMEM;
1173 		mutex_lock(&nd_mapping->lock);
1174 		list_add_tail(&label_ent->list, &nd_mapping->labels);
1175 		mutex_unlock(&nd_mapping->lock);
1176 	}
1177 
1178 	if (ndd->ns_current == -1 || ndd->ns_next == -1)
1179 		/* pass */;
1180 	else
1181 		return max(num_labels, old_num_labels);
1182 
1183 	nsindex = to_namespace_index(ndd, 0);
1184 	memset(nsindex, 0, ndd->nsarea.config_size);
1185 	for (i = 0; i < 2; i++) {
1186 		int rc = nd_label_write_index(ndd, i, 3 - i, ND_NSINDEX_INIT);
1187 
1188 		if (rc)
1189 			return rc;
1190 	}
1191 	ndd->ns_next = 1;
1192 	ndd->ns_current = 0;
1193 
1194 	return max(num_labels, old_num_labels);
1195 }
1196 
del_labels(struct nd_mapping * nd_mapping,u8 * uuid)1197 static int del_labels(struct nd_mapping *nd_mapping, u8 *uuid)
1198 {
1199 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1200 	struct nd_label_ent *label_ent, *e;
1201 	struct nd_namespace_index *nsindex;
1202 	u8 label_uuid[NSLABEL_UUID_LEN];
1203 	unsigned long *free;
1204 	LIST_HEAD(list);
1205 	u32 nslot, slot;
1206 	int active = 0;
1207 
1208 	if (!uuid)
1209 		return 0;
1210 
1211 	/* no index || no labels == nothing to delete */
1212 	if (!preamble_next(ndd, &nsindex, &free, &nslot))
1213 		return 0;
1214 
1215 	mutex_lock(&nd_mapping->lock);
1216 	list_for_each_entry_safe(label_ent, e, &nd_mapping->labels, list) {
1217 		struct nd_namespace_label *nd_label = label_ent->label;
1218 
1219 		if (!nd_label)
1220 			continue;
1221 		active++;
1222 		memcpy(label_uuid, nd_label->uuid, NSLABEL_UUID_LEN);
1223 		if (memcmp(label_uuid, uuid, NSLABEL_UUID_LEN) != 0)
1224 			continue;
1225 		active--;
1226 		slot = to_slot(ndd, nd_label);
1227 		nd_label_free_slot(ndd, slot);
1228 		dev_dbg(ndd->dev, "free: %d\n", slot);
1229 		list_move_tail(&label_ent->list, &list);
1230 		label_ent->label = NULL;
1231 	}
1232 	list_splice_tail_init(&list, &nd_mapping->labels);
1233 
1234 	if (active == 0) {
1235 		nd_mapping_free_labels(nd_mapping);
1236 		dev_dbg(ndd->dev, "no more active labels\n");
1237 	}
1238 	mutex_unlock(&nd_mapping->lock);
1239 
1240 	return nd_label_write_index(ndd, ndd->ns_next,
1241 			nd_inc_seq(__le32_to_cpu(nsindex->seq)), 0);
1242 }
1243 
nd_pmem_namespace_label_update(struct nd_region * nd_region,struct nd_namespace_pmem * nspm,resource_size_t size)1244 int nd_pmem_namespace_label_update(struct nd_region *nd_region,
1245 		struct nd_namespace_pmem *nspm, resource_size_t size)
1246 {
1247 	int i, rc;
1248 
1249 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1250 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1251 		struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
1252 		struct resource *res;
1253 		int count = 0;
1254 
1255 		if (size == 0) {
1256 			rc = del_labels(nd_mapping, nspm->uuid);
1257 			if (rc)
1258 				return rc;
1259 			continue;
1260 		}
1261 
1262 		for_each_dpa_resource(ndd, res)
1263 			if (strncmp(res->name, "pmem", 4) == 0)
1264 				count++;
1265 		WARN_ON_ONCE(!count);
1266 
1267 		rc = init_labels(nd_mapping, count);
1268 		if (rc < 0)
1269 			return rc;
1270 
1271 		rc = __pmem_label_update(nd_region, nd_mapping, nspm, i,
1272 				NSLABEL_FLAG_UPDATING);
1273 		if (rc)
1274 			return rc;
1275 	}
1276 
1277 	if (size == 0)
1278 		return 0;
1279 
1280 	/* Clear the UPDATING flag per UEFI 2.7 expectations */
1281 	for (i = 0; i < nd_region->ndr_mappings; i++) {
1282 		struct nd_mapping *nd_mapping = &nd_region->mapping[i];
1283 
1284 		rc = __pmem_label_update(nd_region, nd_mapping, nspm, i, 0);
1285 		if (rc)
1286 			return rc;
1287 	}
1288 
1289 	return 0;
1290 }
1291 
nd_blk_namespace_label_update(struct nd_region * nd_region,struct nd_namespace_blk * nsblk,resource_size_t size)1292 int nd_blk_namespace_label_update(struct nd_region *nd_region,
1293 		struct nd_namespace_blk *nsblk, resource_size_t size)
1294 {
1295 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
1296 	struct resource *res;
1297 	int count = 0;
1298 
1299 	if (size == 0)
1300 		return del_labels(nd_mapping, nsblk->uuid);
1301 
1302 	for_each_dpa_resource(to_ndd(nd_mapping), res)
1303 		count++;
1304 
1305 	count = init_labels(nd_mapping, count);
1306 	if (count < 0)
1307 		return count;
1308 
1309 	return __blk_label_update(nd_region, nd_mapping, nsblk, count);
1310 }
1311 
nd_label_init(void)1312 int __init nd_label_init(void)
1313 {
1314 	WARN_ON(guid_parse(NVDIMM_BTT_GUID, &nvdimm_btt_guid));
1315 	WARN_ON(guid_parse(NVDIMM_BTT2_GUID, &nvdimm_btt2_guid));
1316 	WARN_ON(guid_parse(NVDIMM_PFN_GUID, &nvdimm_pfn_guid));
1317 	WARN_ON(guid_parse(NVDIMM_DAX_GUID, &nvdimm_dax_guid));
1318 
1319 	return 0;
1320 }
1321