1 // SPDX-License-Identifier: GPL-2.0-only
2 /*
3  * Copyright(c) 2013-2015 Intel Corporation. All rights reserved.
4  */
5 #define pr_fmt(fmt) KBUILD_MODNAME ": " fmt
6 #include <linux/moduleparam.h>
7 #include <linux/vmalloc.h>
8 #include <linux/device.h>
9 #include <linux/ndctl.h>
10 #include <linux/slab.h>
11 #include <linux/io.h>
12 #include <linux/fs.h>
13 #include <linux/mm.h>
14 #include "nd-core.h"
15 #include "label.h"
16 #include "pmem.h"
17 #include "nd.h"
18 
19 static DEFINE_IDA(dimm_ida);
20 
21 static bool noblk;
22 module_param(noblk, bool, 0444);
23 MODULE_PARM_DESC(noblk, "force disable BLK / local alias support");
24 
25 /*
26  * Retrieve bus and dimm handle and return if this bus supports
27  * get_config_data commands
28  */
nvdimm_check_config_data(struct device * dev)29 int nvdimm_check_config_data(struct device *dev)
30 {
31 	struct nvdimm *nvdimm = to_nvdimm(dev);
32 
33 	if (!nvdimm->cmd_mask ||
34 	    !test_bit(ND_CMD_GET_CONFIG_DATA, &nvdimm->cmd_mask)) {
35 		if (test_bit(NDD_LABELING, &nvdimm->flags))
36 			return -ENXIO;
37 		else
38 			return -ENOTTY;
39 	}
40 
41 	return 0;
42 }
43 
validate_dimm(struct nvdimm_drvdata * ndd)44 static int validate_dimm(struct nvdimm_drvdata *ndd)
45 {
46 	int rc;
47 
48 	if (!ndd)
49 		return -EINVAL;
50 
51 	rc = nvdimm_check_config_data(ndd->dev);
52 	if (rc)
53 		dev_dbg(ndd->dev, "%ps: %s error: %d\n",
54 				__builtin_return_address(0), __func__, rc);
55 	return rc;
56 }
57 
58 /**
59  * nvdimm_init_nsarea - determine the geometry of a dimm's namespace area
60  * @nvdimm: dimm to initialize
61  */
nvdimm_init_nsarea(struct nvdimm_drvdata * ndd)62 int nvdimm_init_nsarea(struct nvdimm_drvdata *ndd)
63 {
64 	struct nd_cmd_get_config_size *cmd = &ndd->nsarea;
65 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
66 	struct nvdimm_bus_descriptor *nd_desc;
67 	int rc = validate_dimm(ndd);
68 	int cmd_rc = 0;
69 
70 	if (rc)
71 		return rc;
72 
73 	if (cmd->config_size)
74 		return 0; /* already valid */
75 
76 	memset(cmd, 0, sizeof(*cmd));
77 	nd_desc = nvdimm_bus->nd_desc;
78 	rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
79 			ND_CMD_GET_CONFIG_SIZE, cmd, sizeof(*cmd), &cmd_rc);
80 	if (rc < 0)
81 		return rc;
82 	return cmd_rc;
83 }
84 
nvdimm_get_config_data(struct nvdimm_drvdata * ndd,void * buf,size_t offset,size_t len)85 int nvdimm_get_config_data(struct nvdimm_drvdata *ndd, void *buf,
86 			   size_t offset, size_t len)
87 {
88 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
89 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
90 	int rc = validate_dimm(ndd), cmd_rc = 0;
91 	struct nd_cmd_get_config_data_hdr *cmd;
92 	size_t max_cmd_size, buf_offset;
93 
94 	if (rc)
95 		return rc;
96 
97 	if (offset + len > ndd->nsarea.config_size)
98 		return -ENXIO;
99 
100 	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
101 	cmd = kvzalloc(max_cmd_size + sizeof(*cmd), GFP_KERNEL);
102 	if (!cmd)
103 		return -ENOMEM;
104 
105 	for (buf_offset = 0; len;
106 	     len -= cmd->in_length, buf_offset += cmd->in_length) {
107 		size_t cmd_size;
108 
109 		cmd->in_offset = offset + buf_offset;
110 		cmd->in_length = min(max_cmd_size, len);
111 
112 		cmd_size = sizeof(*cmd) + cmd->in_length;
113 
114 		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
115 				ND_CMD_GET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
116 		if (rc < 0)
117 			break;
118 		if (cmd_rc < 0) {
119 			rc = cmd_rc;
120 			break;
121 		}
122 
123 		/* out_buf should be valid, copy it into our output buffer */
124 		memcpy(buf + buf_offset, cmd->out_buf, cmd->in_length);
125 	}
126 	kvfree(cmd);
127 
128 	return rc;
129 }
130 
nvdimm_set_config_data(struct nvdimm_drvdata * ndd,size_t offset,void * buf,size_t len)131 int nvdimm_set_config_data(struct nvdimm_drvdata *ndd, size_t offset,
132 		void *buf, size_t len)
133 {
134 	size_t max_cmd_size, buf_offset;
135 	struct nd_cmd_set_config_hdr *cmd;
136 	int rc = validate_dimm(ndd), cmd_rc = 0;
137 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
138 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
139 
140 	if (rc)
141 		return rc;
142 
143 	if (offset + len > ndd->nsarea.config_size)
144 		return -ENXIO;
145 
146 	max_cmd_size = min_t(u32, len, ndd->nsarea.max_xfer);
147 	cmd = kvzalloc(max_cmd_size + sizeof(*cmd) + sizeof(u32), GFP_KERNEL);
148 	if (!cmd)
149 		return -ENOMEM;
150 
151 	for (buf_offset = 0; len; len -= cmd->in_length,
152 			buf_offset += cmd->in_length) {
153 		size_t cmd_size;
154 
155 		cmd->in_offset = offset + buf_offset;
156 		cmd->in_length = min(max_cmd_size, len);
157 		memcpy(cmd->in_buf, buf + buf_offset, cmd->in_length);
158 
159 		/* status is output in the last 4-bytes of the command buffer */
160 		cmd_size = sizeof(*cmd) + cmd->in_length + sizeof(u32);
161 
162 		rc = nd_desc->ndctl(nd_desc, to_nvdimm(ndd->dev),
163 				ND_CMD_SET_CONFIG_DATA, cmd, cmd_size, &cmd_rc);
164 		if (rc < 0)
165 			break;
166 		if (cmd_rc < 0) {
167 			rc = cmd_rc;
168 			break;
169 		}
170 	}
171 	kvfree(cmd);
172 
173 	return rc;
174 }
175 
nvdimm_set_labeling(struct device * dev)176 void nvdimm_set_labeling(struct device *dev)
177 {
178 	struct nvdimm *nvdimm = to_nvdimm(dev);
179 
180 	set_bit(NDD_LABELING, &nvdimm->flags);
181 }
182 
nvdimm_set_locked(struct device * dev)183 void nvdimm_set_locked(struct device *dev)
184 {
185 	struct nvdimm *nvdimm = to_nvdimm(dev);
186 
187 	set_bit(NDD_LOCKED, &nvdimm->flags);
188 }
189 
nvdimm_clear_locked(struct device * dev)190 void nvdimm_clear_locked(struct device *dev)
191 {
192 	struct nvdimm *nvdimm = to_nvdimm(dev);
193 
194 	clear_bit(NDD_LOCKED, &nvdimm->flags);
195 }
196 
nvdimm_release(struct device * dev)197 static void nvdimm_release(struct device *dev)
198 {
199 	struct nvdimm *nvdimm = to_nvdimm(dev);
200 
201 	ida_simple_remove(&dimm_ida, nvdimm->id);
202 	kfree(nvdimm);
203 }
204 
to_nvdimm(struct device * dev)205 struct nvdimm *to_nvdimm(struct device *dev)
206 {
207 	struct nvdimm *nvdimm = container_of(dev, struct nvdimm, dev);
208 
209 	WARN_ON(!is_nvdimm(dev));
210 	return nvdimm;
211 }
212 EXPORT_SYMBOL_GPL(to_nvdimm);
213 
nd_blk_region_to_dimm(struct nd_blk_region * ndbr)214 struct nvdimm *nd_blk_region_to_dimm(struct nd_blk_region *ndbr)
215 {
216 	struct nd_region *nd_region = &ndbr->nd_region;
217 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
218 
219 	return nd_mapping->nvdimm;
220 }
221 EXPORT_SYMBOL_GPL(nd_blk_region_to_dimm);
222 
nd_blk_memremap_flags(struct nd_blk_region * ndbr)223 unsigned long nd_blk_memremap_flags(struct nd_blk_region *ndbr)
224 {
225 	/* pmem mapping properties are private to libnvdimm */
226 	return ARCH_MEMREMAP_PMEM;
227 }
228 EXPORT_SYMBOL_GPL(nd_blk_memremap_flags);
229 
to_ndd(struct nd_mapping * nd_mapping)230 struct nvdimm_drvdata *to_ndd(struct nd_mapping *nd_mapping)
231 {
232 	struct nvdimm *nvdimm = nd_mapping->nvdimm;
233 
234 	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
235 
236 	return dev_get_drvdata(&nvdimm->dev);
237 }
238 EXPORT_SYMBOL(to_ndd);
239 
nvdimm_drvdata_release(struct kref * kref)240 void nvdimm_drvdata_release(struct kref *kref)
241 {
242 	struct nvdimm_drvdata *ndd = container_of(kref, typeof(*ndd), kref);
243 	struct device *dev = ndd->dev;
244 	struct resource *res, *_r;
245 
246 	dev_dbg(dev, "trace\n");
247 	nvdimm_bus_lock(dev);
248 	for_each_dpa_resource_safe(ndd, res, _r)
249 		nvdimm_free_dpa(ndd, res);
250 	nvdimm_bus_unlock(dev);
251 
252 	kvfree(ndd->data);
253 	kfree(ndd);
254 	put_device(dev);
255 }
256 
get_ndd(struct nvdimm_drvdata * ndd)257 void get_ndd(struct nvdimm_drvdata *ndd)
258 {
259 	kref_get(&ndd->kref);
260 }
261 
put_ndd(struct nvdimm_drvdata * ndd)262 void put_ndd(struct nvdimm_drvdata *ndd)
263 {
264 	if (ndd)
265 		kref_put(&ndd->kref, nvdimm_drvdata_release);
266 }
267 
nvdimm_name(struct nvdimm * nvdimm)268 const char *nvdimm_name(struct nvdimm *nvdimm)
269 {
270 	return dev_name(&nvdimm->dev);
271 }
272 EXPORT_SYMBOL_GPL(nvdimm_name);
273 
nvdimm_kobj(struct nvdimm * nvdimm)274 struct kobject *nvdimm_kobj(struct nvdimm *nvdimm)
275 {
276 	return &nvdimm->dev.kobj;
277 }
278 EXPORT_SYMBOL_GPL(nvdimm_kobj);
279 
nvdimm_cmd_mask(struct nvdimm * nvdimm)280 unsigned long nvdimm_cmd_mask(struct nvdimm *nvdimm)
281 {
282 	return nvdimm->cmd_mask;
283 }
284 EXPORT_SYMBOL_GPL(nvdimm_cmd_mask);
285 
nvdimm_provider_data(struct nvdimm * nvdimm)286 void *nvdimm_provider_data(struct nvdimm *nvdimm)
287 {
288 	if (nvdimm)
289 		return nvdimm->provider_data;
290 	return NULL;
291 }
292 EXPORT_SYMBOL_GPL(nvdimm_provider_data);
293 
commands_show(struct device * dev,struct device_attribute * attr,char * buf)294 static ssize_t commands_show(struct device *dev,
295 		struct device_attribute *attr, char *buf)
296 {
297 	struct nvdimm *nvdimm = to_nvdimm(dev);
298 	int cmd, len = 0;
299 
300 	if (!nvdimm->cmd_mask)
301 		return sprintf(buf, "\n");
302 
303 	for_each_set_bit(cmd, &nvdimm->cmd_mask, BITS_PER_LONG)
304 		len += sprintf(buf + len, "%s ", nvdimm_cmd_name(cmd));
305 	len += sprintf(buf + len, "\n");
306 	return len;
307 }
308 static DEVICE_ATTR_RO(commands);
309 
flags_show(struct device * dev,struct device_attribute * attr,char * buf)310 static ssize_t flags_show(struct device *dev,
311 		struct device_attribute *attr, char *buf)
312 {
313 	struct nvdimm *nvdimm = to_nvdimm(dev);
314 
315 	return sprintf(buf, "%s%s%s\n",
316 			test_bit(NDD_ALIASING, &nvdimm->flags) ? "alias " : "",
317 			test_bit(NDD_LABELING, &nvdimm->flags) ? "label " : "",
318 			test_bit(NDD_LOCKED, &nvdimm->flags) ? "lock " : "");
319 }
320 static DEVICE_ATTR_RO(flags);
321 
state_show(struct device * dev,struct device_attribute * attr,char * buf)322 static ssize_t state_show(struct device *dev, struct device_attribute *attr,
323 		char *buf)
324 {
325 	struct nvdimm *nvdimm = to_nvdimm(dev);
326 
327 	/*
328 	 * The state may be in the process of changing, userspace should
329 	 * quiesce probing if it wants a static answer
330 	 */
331 	nvdimm_bus_lock(dev);
332 	nvdimm_bus_unlock(dev);
333 	return sprintf(buf, "%s\n", atomic_read(&nvdimm->busy)
334 			? "active" : "idle");
335 }
336 static DEVICE_ATTR_RO(state);
337 
available_slots_show(struct device * dev,struct device_attribute * attr,char * buf)338 static ssize_t available_slots_show(struct device *dev,
339 		struct device_attribute *attr, char *buf)
340 {
341 	struct nvdimm_drvdata *ndd = dev_get_drvdata(dev);
342 	ssize_t rc;
343 	u32 nfree;
344 
345 	if (!ndd)
346 		return -ENXIO;
347 
348 	nvdimm_bus_lock(dev);
349 	nfree = nd_label_nfree(ndd);
350 	if (nfree - 1 > nfree) {
351 		dev_WARN_ONCE(dev, 1, "we ate our last label?\n");
352 		nfree = 0;
353 	} else
354 		nfree--;
355 	rc = sprintf(buf, "%d\n", nfree);
356 	nvdimm_bus_unlock(dev);
357 	return rc;
358 }
359 static DEVICE_ATTR_RO(available_slots);
360 
security_show(struct device * dev,struct device_attribute * attr,char * buf)361 __weak ssize_t security_show(struct device *dev,
362 		struct device_attribute *attr, char *buf)
363 {
364 	struct nvdimm *nvdimm = to_nvdimm(dev);
365 
366 	if (test_bit(NVDIMM_SECURITY_OVERWRITE, &nvdimm->sec.flags))
367 		return sprintf(buf, "overwrite\n");
368 	if (test_bit(NVDIMM_SECURITY_DISABLED, &nvdimm->sec.flags))
369 		return sprintf(buf, "disabled\n");
370 	if (test_bit(NVDIMM_SECURITY_UNLOCKED, &nvdimm->sec.flags))
371 		return sprintf(buf, "unlocked\n");
372 	if (test_bit(NVDIMM_SECURITY_LOCKED, &nvdimm->sec.flags))
373 		return sprintf(buf, "locked\n");
374 	return -ENOTTY;
375 }
376 
frozen_show(struct device * dev,struct device_attribute * attr,char * buf)377 static ssize_t frozen_show(struct device *dev,
378 		struct device_attribute *attr, char *buf)
379 {
380 	struct nvdimm *nvdimm = to_nvdimm(dev);
381 
382 	return sprintf(buf, "%d\n", test_bit(NVDIMM_SECURITY_FROZEN,
383 				&nvdimm->sec.flags));
384 }
385 static DEVICE_ATTR_RO(frozen);
386 
security_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)387 static ssize_t security_store(struct device *dev,
388 		struct device_attribute *attr, const char *buf, size_t len)
389 
390 {
391 	ssize_t rc;
392 
393 	/*
394 	 * Require all userspace triggered security management to be
395 	 * done while probing is idle and the DIMM is not in active use
396 	 * in any region.
397 	 */
398 	nd_device_lock(dev);
399 	nvdimm_bus_lock(dev);
400 	wait_nvdimm_bus_probe_idle(dev);
401 	rc = nvdimm_security_store(dev, buf, len);
402 	nvdimm_bus_unlock(dev);
403 	nd_device_unlock(dev);
404 
405 	return rc;
406 }
407 static DEVICE_ATTR_RW(security);
408 
409 static struct attribute *nvdimm_attributes[] = {
410 	&dev_attr_state.attr,
411 	&dev_attr_flags.attr,
412 	&dev_attr_commands.attr,
413 	&dev_attr_available_slots.attr,
414 	&dev_attr_security.attr,
415 	&dev_attr_frozen.attr,
416 	NULL,
417 };
418 
nvdimm_visible(struct kobject * kobj,struct attribute * a,int n)419 static umode_t nvdimm_visible(struct kobject *kobj, struct attribute *a, int n)
420 {
421 	struct device *dev = container_of(kobj, typeof(*dev), kobj);
422 	struct nvdimm *nvdimm = to_nvdimm(dev);
423 
424 	if (a != &dev_attr_security.attr && a != &dev_attr_frozen.attr)
425 		return a->mode;
426 	if (!nvdimm->sec.flags)
427 		return 0;
428 
429 	if (a == &dev_attr_security.attr) {
430 		/* Are there any state mutation ops (make writable)? */
431 		if (nvdimm->sec.ops->freeze || nvdimm->sec.ops->disable
432 				|| nvdimm->sec.ops->change_key
433 				|| nvdimm->sec.ops->erase
434 				|| nvdimm->sec.ops->overwrite)
435 			return a->mode;
436 		return 0444;
437 	}
438 
439 	if (nvdimm->sec.ops->freeze)
440 		return a->mode;
441 	return 0;
442 }
443 
444 static const struct attribute_group nvdimm_attribute_group = {
445 	.attrs = nvdimm_attributes,
446 	.is_visible = nvdimm_visible,
447 };
448 
result_show(struct device * dev,struct device_attribute * attr,char * buf)449 static ssize_t result_show(struct device *dev, struct device_attribute *attr, char *buf)
450 {
451 	struct nvdimm *nvdimm = to_nvdimm(dev);
452 	enum nvdimm_fwa_result result;
453 
454 	if (!nvdimm->fw_ops)
455 		return -EOPNOTSUPP;
456 
457 	nvdimm_bus_lock(dev);
458 	result = nvdimm->fw_ops->activate_result(nvdimm);
459 	nvdimm_bus_unlock(dev);
460 
461 	switch (result) {
462 	case NVDIMM_FWA_RESULT_NONE:
463 		return sprintf(buf, "none\n");
464 	case NVDIMM_FWA_RESULT_SUCCESS:
465 		return sprintf(buf, "success\n");
466 	case NVDIMM_FWA_RESULT_FAIL:
467 		return sprintf(buf, "fail\n");
468 	case NVDIMM_FWA_RESULT_NOTSTAGED:
469 		return sprintf(buf, "not_staged\n");
470 	case NVDIMM_FWA_RESULT_NEEDRESET:
471 		return sprintf(buf, "need_reset\n");
472 	default:
473 		return -ENXIO;
474 	}
475 }
476 static DEVICE_ATTR_ADMIN_RO(result);
477 
activate_show(struct device * dev,struct device_attribute * attr,char * buf)478 static ssize_t activate_show(struct device *dev, struct device_attribute *attr, char *buf)
479 {
480 	struct nvdimm *nvdimm = to_nvdimm(dev);
481 	enum nvdimm_fwa_state state;
482 
483 	if (!nvdimm->fw_ops)
484 		return -EOPNOTSUPP;
485 
486 	nvdimm_bus_lock(dev);
487 	state = nvdimm->fw_ops->activate_state(nvdimm);
488 	nvdimm_bus_unlock(dev);
489 
490 	switch (state) {
491 	case NVDIMM_FWA_IDLE:
492 		return sprintf(buf, "idle\n");
493 	case NVDIMM_FWA_BUSY:
494 		return sprintf(buf, "busy\n");
495 	case NVDIMM_FWA_ARMED:
496 		return sprintf(buf, "armed\n");
497 	default:
498 		return -ENXIO;
499 	}
500 }
501 
activate_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t len)502 static ssize_t activate_store(struct device *dev, struct device_attribute *attr,
503 		const char *buf, size_t len)
504 {
505 	struct nvdimm *nvdimm = to_nvdimm(dev);
506 	enum nvdimm_fwa_trigger arg;
507 	int rc;
508 
509 	if (!nvdimm->fw_ops)
510 		return -EOPNOTSUPP;
511 
512 	if (sysfs_streq(buf, "arm"))
513 		arg = NVDIMM_FWA_ARM;
514 	else if (sysfs_streq(buf, "disarm"))
515 		arg = NVDIMM_FWA_DISARM;
516 	else
517 		return -EINVAL;
518 
519 	nvdimm_bus_lock(dev);
520 	rc = nvdimm->fw_ops->arm(nvdimm, arg);
521 	nvdimm_bus_unlock(dev);
522 
523 	if (rc < 0)
524 		return rc;
525 	return len;
526 }
527 static DEVICE_ATTR_ADMIN_RW(activate);
528 
529 static struct attribute *nvdimm_firmware_attributes[] = {
530 	&dev_attr_activate.attr,
531 	&dev_attr_result.attr,
532 	NULL,
533 };
534 
nvdimm_firmware_visible(struct kobject * kobj,struct attribute * a,int n)535 static umode_t nvdimm_firmware_visible(struct kobject *kobj, struct attribute *a, int n)
536 {
537 	struct device *dev = container_of(kobj, typeof(*dev), kobj);
538 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(dev);
539 	struct nvdimm_bus_descriptor *nd_desc = nvdimm_bus->nd_desc;
540 	struct nvdimm *nvdimm = to_nvdimm(dev);
541 	enum nvdimm_fwa_capability cap;
542 
543 	if (!nd_desc->fw_ops)
544 		return 0;
545 	if (!nvdimm->fw_ops)
546 		return 0;
547 
548 	nvdimm_bus_lock(dev);
549 	cap = nd_desc->fw_ops->capability(nd_desc);
550 	nvdimm_bus_unlock(dev);
551 
552 	if (cap < NVDIMM_FWA_CAP_QUIESCE)
553 		return 0;
554 
555 	return a->mode;
556 }
557 
558 static const struct attribute_group nvdimm_firmware_attribute_group = {
559 	.name = "firmware",
560 	.attrs = nvdimm_firmware_attributes,
561 	.is_visible = nvdimm_firmware_visible,
562 };
563 
564 static const struct attribute_group *nvdimm_attribute_groups[] = {
565 	&nd_device_attribute_group,
566 	&nvdimm_attribute_group,
567 	&nvdimm_firmware_attribute_group,
568 	NULL,
569 };
570 
571 static const struct device_type nvdimm_device_type = {
572 	.name = "nvdimm",
573 	.release = nvdimm_release,
574 	.groups = nvdimm_attribute_groups,
575 };
576 
is_nvdimm(struct device * dev)577 bool is_nvdimm(struct device *dev)
578 {
579 	return dev->type == &nvdimm_device_type;
580 }
581 
__nvdimm_create(struct nvdimm_bus * nvdimm_bus,void * provider_data,const struct attribute_group ** groups,unsigned long flags,unsigned long cmd_mask,int num_flush,struct resource * flush_wpq,const char * dimm_id,const struct nvdimm_security_ops * sec_ops,const struct nvdimm_fw_ops * fw_ops)582 struct nvdimm *__nvdimm_create(struct nvdimm_bus *nvdimm_bus,
583 		void *provider_data, const struct attribute_group **groups,
584 		unsigned long flags, unsigned long cmd_mask, int num_flush,
585 		struct resource *flush_wpq, const char *dimm_id,
586 		const struct nvdimm_security_ops *sec_ops,
587 		const struct nvdimm_fw_ops *fw_ops)
588 {
589 	struct nvdimm *nvdimm = kzalloc(sizeof(*nvdimm), GFP_KERNEL);
590 	struct device *dev;
591 
592 	if (!nvdimm)
593 		return NULL;
594 
595 	nvdimm->id = ida_simple_get(&dimm_ida, 0, 0, GFP_KERNEL);
596 	if (nvdimm->id < 0) {
597 		kfree(nvdimm);
598 		return NULL;
599 	}
600 
601 	nvdimm->dimm_id = dimm_id;
602 	nvdimm->provider_data = provider_data;
603 	if (noblk)
604 		flags |= 1 << NDD_NOBLK;
605 	nvdimm->flags = flags;
606 	nvdimm->cmd_mask = cmd_mask;
607 	nvdimm->num_flush = num_flush;
608 	nvdimm->flush_wpq = flush_wpq;
609 	atomic_set(&nvdimm->busy, 0);
610 	dev = &nvdimm->dev;
611 	dev_set_name(dev, "nmem%d", nvdimm->id);
612 	dev->parent = &nvdimm_bus->dev;
613 	dev->type = &nvdimm_device_type;
614 	dev->devt = MKDEV(nvdimm_major, nvdimm->id);
615 	dev->groups = groups;
616 	nvdimm->sec.ops = sec_ops;
617 	nvdimm->fw_ops = fw_ops;
618 	nvdimm->sec.overwrite_tmo = 0;
619 	INIT_DELAYED_WORK(&nvdimm->dwork, nvdimm_security_overwrite_query);
620 	/*
621 	 * Security state must be initialized before device_add() for
622 	 * attribute visibility.
623 	 */
624 	/* get security state and extended (master) state */
625 	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
626 	nvdimm->sec.ext_flags = nvdimm_security_flags(nvdimm, NVDIMM_MASTER);
627 	nd_device_register(dev);
628 
629 	return nvdimm;
630 }
631 EXPORT_SYMBOL_GPL(__nvdimm_create);
632 
shutdown_security_notify(void * data)633 static void shutdown_security_notify(void *data)
634 {
635 	struct nvdimm *nvdimm = data;
636 
637 	sysfs_put(nvdimm->sec.overwrite_state);
638 }
639 
nvdimm_security_setup_events(struct device * dev)640 int nvdimm_security_setup_events(struct device *dev)
641 {
642 	struct nvdimm *nvdimm = to_nvdimm(dev);
643 
644 	if (!nvdimm->sec.flags || !nvdimm->sec.ops
645 			|| !nvdimm->sec.ops->overwrite)
646 		return 0;
647 	nvdimm->sec.overwrite_state = sysfs_get_dirent(dev->kobj.sd, "security");
648 	if (!nvdimm->sec.overwrite_state)
649 		return -ENOMEM;
650 
651 	return devm_add_action_or_reset(dev, shutdown_security_notify, nvdimm);
652 }
653 EXPORT_SYMBOL_GPL(nvdimm_security_setup_events);
654 
nvdimm_in_overwrite(struct nvdimm * nvdimm)655 int nvdimm_in_overwrite(struct nvdimm *nvdimm)
656 {
657 	return test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags);
658 }
659 EXPORT_SYMBOL_GPL(nvdimm_in_overwrite);
660 
nvdimm_security_freeze(struct nvdimm * nvdimm)661 int nvdimm_security_freeze(struct nvdimm *nvdimm)
662 {
663 	int rc;
664 
665 	WARN_ON_ONCE(!is_nvdimm_bus_locked(&nvdimm->dev));
666 
667 	if (!nvdimm->sec.ops || !nvdimm->sec.ops->freeze)
668 		return -EOPNOTSUPP;
669 
670 	if (!nvdimm->sec.flags)
671 		return -EIO;
672 
673 	if (test_bit(NDD_SECURITY_OVERWRITE, &nvdimm->flags)) {
674 		dev_warn(&nvdimm->dev, "Overwrite operation in progress.\n");
675 		return -EBUSY;
676 	}
677 
678 	rc = nvdimm->sec.ops->freeze(nvdimm);
679 	nvdimm->sec.flags = nvdimm_security_flags(nvdimm, NVDIMM_USER);
680 
681 	return rc;
682 }
683 
dpa_align(struct nd_region * nd_region)684 static unsigned long dpa_align(struct nd_region *nd_region)
685 {
686 	struct device *dev = &nd_region->dev;
687 
688 	if (dev_WARN_ONCE(dev, !is_nvdimm_bus_locked(dev),
689 				"bus lock required for capacity provision\n"))
690 		return 0;
691 	if (dev_WARN_ONCE(dev, !nd_region->ndr_mappings || nd_region->align
692 				% nd_region->ndr_mappings,
693 				"invalid region align %#lx mappings: %d\n",
694 				nd_region->align, nd_region->ndr_mappings))
695 		return 0;
696 	return nd_region->align / nd_region->ndr_mappings;
697 }
698 
alias_dpa_busy(struct device * dev,void * data)699 int alias_dpa_busy(struct device *dev, void *data)
700 {
701 	resource_size_t map_end, blk_start, new;
702 	struct blk_alloc_info *info = data;
703 	struct nd_mapping *nd_mapping;
704 	struct nd_region *nd_region;
705 	struct nvdimm_drvdata *ndd;
706 	struct resource *res;
707 	unsigned long align;
708 	int i;
709 
710 	if (!is_memory(dev))
711 		return 0;
712 
713 	nd_region = to_nd_region(dev);
714 	for (i = 0; i < nd_region->ndr_mappings; i++) {
715 		nd_mapping  = &nd_region->mapping[i];
716 		if (nd_mapping->nvdimm == info->nd_mapping->nvdimm)
717 			break;
718 	}
719 
720 	if (i >= nd_region->ndr_mappings)
721 		return 0;
722 
723 	ndd = to_ndd(nd_mapping);
724 	map_end = nd_mapping->start + nd_mapping->size - 1;
725 	blk_start = nd_mapping->start;
726 
727 	/*
728 	 * In the allocation case ->res is set to free space that we are
729 	 * looking to validate against PMEM aliasing collision rules
730 	 * (i.e. BLK is allocated after all aliased PMEM).
731 	 */
732 	if (info->res) {
733 		if (info->res->start >= nd_mapping->start
734 				&& info->res->start < map_end)
735 			/* pass */;
736 		else
737 			return 0;
738 	}
739 
740  retry:
741 	/*
742 	 * Find the free dpa from the end of the last pmem allocation to
743 	 * the end of the interleave-set mapping.
744 	 */
745 	align = dpa_align(nd_region);
746 	if (!align)
747 		return 0;
748 
749 	for_each_dpa_resource(ndd, res) {
750 		resource_size_t start, end;
751 
752 		if (strncmp(res->name, "pmem", 4) != 0)
753 			continue;
754 
755 		start = ALIGN_DOWN(res->start, align);
756 		end = ALIGN(res->end + 1, align) - 1;
757 		if ((start >= blk_start && start < map_end)
758 				|| (end >= blk_start && end <= map_end)) {
759 			new = max(blk_start, min(map_end, end) + 1);
760 			if (new != blk_start) {
761 				blk_start = new;
762 				goto retry;
763 			}
764 		}
765 	}
766 
767 	/* update the free space range with the probed blk_start */
768 	if (info->res && blk_start > info->res->start) {
769 		info->res->start = max(info->res->start, blk_start);
770 		if (info->res->start > info->res->end)
771 			info->res->end = info->res->start - 1;
772 		return 1;
773 	}
774 
775 	info->available -= blk_start - nd_mapping->start;
776 
777 	return 0;
778 }
779 
780 /**
781  * nd_blk_available_dpa - account the unused dpa of BLK region
782  * @nd_mapping: container of dpa-resource-root + labels
783  *
784  * Unlike PMEM, BLK namespaces can occupy discontiguous DPA ranges, but
785  * we arrange for them to never start at an lower dpa than the last
786  * PMEM allocation in an aliased region.
787  */
nd_blk_available_dpa(struct nd_region * nd_region)788 resource_size_t nd_blk_available_dpa(struct nd_region *nd_region)
789 {
790 	struct nvdimm_bus *nvdimm_bus = walk_to_nvdimm_bus(&nd_region->dev);
791 	struct nd_mapping *nd_mapping = &nd_region->mapping[0];
792 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
793 	struct blk_alloc_info info = {
794 		.nd_mapping = nd_mapping,
795 		.available = nd_mapping->size,
796 		.res = NULL,
797 	};
798 	struct resource *res;
799 	unsigned long align;
800 
801 	if (!ndd)
802 		return 0;
803 
804 	device_for_each_child(&nvdimm_bus->dev, &info, alias_dpa_busy);
805 
806 	/* now account for busy blk allocations in unaliased dpa */
807 	align = dpa_align(nd_region);
808 	if (!align)
809 		return 0;
810 	for_each_dpa_resource(ndd, res) {
811 		resource_size_t start, end, size;
812 
813 		if (strncmp(res->name, "blk", 3) != 0)
814 			continue;
815 		start = ALIGN_DOWN(res->start, align);
816 		end = ALIGN(res->end + 1, align) - 1;
817 		size = end - start + 1;
818 		if (size >= info.available)
819 			return 0;
820 		info.available -= size;
821 	}
822 
823 	return info.available;
824 }
825 
826 /**
827  * nd_pmem_max_contiguous_dpa - For the given dimm+region, return the max
828  *			   contiguous unallocated dpa range.
829  * @nd_region: constrain available space check to this reference region
830  * @nd_mapping: container of dpa-resource-root + labels
831  */
nd_pmem_max_contiguous_dpa(struct nd_region * nd_region,struct nd_mapping * nd_mapping)832 resource_size_t nd_pmem_max_contiguous_dpa(struct nd_region *nd_region,
833 					   struct nd_mapping *nd_mapping)
834 {
835 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
836 	struct nvdimm_bus *nvdimm_bus;
837 	resource_size_t max = 0;
838 	struct resource *res;
839 	unsigned long align;
840 
841 	/* if a dimm is disabled the available capacity is zero */
842 	if (!ndd)
843 		return 0;
844 
845 	align = dpa_align(nd_region);
846 	if (!align)
847 		return 0;
848 
849 	nvdimm_bus = walk_to_nvdimm_bus(ndd->dev);
850 	if (__reserve_free_pmem(&nd_region->dev, nd_mapping->nvdimm))
851 		return 0;
852 	for_each_dpa_resource(ndd, res) {
853 		resource_size_t start, end;
854 
855 		if (strcmp(res->name, "pmem-reserve") != 0)
856 			continue;
857 		/* trim free space relative to current alignment setting */
858 		start = ALIGN(res->start, align);
859 		end = ALIGN_DOWN(res->end + 1, align) - 1;
860 		if (end < start)
861 			continue;
862 		if (end - start + 1 > max)
863 			max = end - start + 1;
864 	}
865 	release_free_pmem(nvdimm_bus, nd_mapping);
866 	return max;
867 }
868 
869 /**
870  * nd_pmem_available_dpa - for the given dimm+region account unallocated dpa
871  * @nd_mapping: container of dpa-resource-root + labels
872  * @nd_region: constrain available space check to this reference region
873  * @overlap: calculate available space assuming this level of overlap
874  *
875  * Validate that a PMEM label, if present, aligns with the start of an
876  * interleave set and truncate the available size at the lowest BLK
877  * overlap point.
878  *
879  * The expectation is that this routine is called multiple times as it
880  * probes for the largest BLK encroachment for any single member DIMM of
881  * the interleave set.  Once that value is determined the PMEM-limit for
882  * the set can be established.
883  */
nd_pmem_available_dpa(struct nd_region * nd_region,struct nd_mapping * nd_mapping,resource_size_t * overlap)884 resource_size_t nd_pmem_available_dpa(struct nd_region *nd_region,
885 		struct nd_mapping *nd_mapping, resource_size_t *overlap)
886 {
887 	resource_size_t map_start, map_end, busy = 0, available, blk_start;
888 	struct nvdimm_drvdata *ndd = to_ndd(nd_mapping);
889 	struct resource *res;
890 	const char *reason;
891 	unsigned long align;
892 
893 	if (!ndd)
894 		return 0;
895 
896 	align = dpa_align(nd_region);
897 	if (!align)
898 		return 0;
899 
900 	map_start = nd_mapping->start;
901 	map_end = map_start + nd_mapping->size - 1;
902 	blk_start = max(map_start, map_end + 1 - *overlap);
903 	for_each_dpa_resource(ndd, res) {
904 		resource_size_t start, end;
905 
906 		start = ALIGN_DOWN(res->start, align);
907 		end = ALIGN(res->end + 1, align) - 1;
908 		if (start >= map_start && start < map_end) {
909 			if (strncmp(res->name, "blk", 3) == 0)
910 				blk_start = min(blk_start,
911 						max(map_start, start));
912 			else if (end > map_end) {
913 				reason = "misaligned to iset";
914 				goto err;
915 			} else
916 				busy += end - start + 1;
917 		} else if (end >= map_start && end <= map_end) {
918 			if (strncmp(res->name, "blk", 3) == 0) {
919 				/*
920 				 * If a BLK allocation overlaps the start of
921 				 * PMEM the entire interleave set may now only
922 				 * be used for BLK.
923 				 */
924 				blk_start = map_start;
925 			} else
926 				busy += end - start + 1;
927 		} else if (map_start > start && map_start < end) {
928 			/* total eclipse of the mapping */
929 			busy += nd_mapping->size;
930 			blk_start = map_start;
931 		}
932 	}
933 
934 	*overlap = map_end + 1 - blk_start;
935 	available = blk_start - map_start;
936 	if (busy < available)
937 		return ALIGN_DOWN(available - busy, align);
938 	return 0;
939 
940  err:
941 	nd_dbg_dpa(nd_region, ndd, res, "%s\n", reason);
942 	return 0;
943 }
944 
nvdimm_free_dpa(struct nvdimm_drvdata * ndd,struct resource * res)945 void nvdimm_free_dpa(struct nvdimm_drvdata *ndd, struct resource *res)
946 {
947 	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
948 	kfree(res->name);
949 	__release_region(&ndd->dpa, res->start, resource_size(res));
950 }
951 
nvdimm_allocate_dpa(struct nvdimm_drvdata * ndd,struct nd_label_id * label_id,resource_size_t start,resource_size_t n)952 struct resource *nvdimm_allocate_dpa(struct nvdimm_drvdata *ndd,
953 		struct nd_label_id *label_id, resource_size_t start,
954 		resource_size_t n)
955 {
956 	char *name = kmemdup(label_id, sizeof(*label_id), GFP_KERNEL);
957 	struct resource *res;
958 
959 	if (!name)
960 		return NULL;
961 
962 	WARN_ON_ONCE(!is_nvdimm_bus_locked(ndd->dev));
963 	res = __request_region(&ndd->dpa, start, n, name, 0);
964 	if (!res)
965 		kfree(name);
966 	return res;
967 }
968 
969 /**
970  * nvdimm_allocated_dpa - sum up the dpa currently allocated to this label_id
971  * @nvdimm: container of dpa-resource-root + labels
972  * @label_id: dpa resource name of the form {pmem|blk}-<human readable uuid>
973  */
nvdimm_allocated_dpa(struct nvdimm_drvdata * ndd,struct nd_label_id * label_id)974 resource_size_t nvdimm_allocated_dpa(struct nvdimm_drvdata *ndd,
975 		struct nd_label_id *label_id)
976 {
977 	resource_size_t allocated = 0;
978 	struct resource *res;
979 
980 	for_each_dpa_resource(ndd, res)
981 		if (strcmp(res->name, label_id->id) == 0)
982 			allocated += resource_size(res);
983 
984 	return allocated;
985 }
986 
count_dimms(struct device * dev,void * c)987 static int count_dimms(struct device *dev, void *c)
988 {
989 	int *count = c;
990 
991 	if (is_nvdimm(dev))
992 		(*count)++;
993 	return 0;
994 }
995 
nvdimm_bus_check_dimm_count(struct nvdimm_bus * nvdimm_bus,int dimm_count)996 int nvdimm_bus_check_dimm_count(struct nvdimm_bus *nvdimm_bus, int dimm_count)
997 {
998 	int count = 0;
999 	/* Flush any possible dimm registration failures */
1000 	nd_synchronize();
1001 
1002 	device_for_each_child(&nvdimm_bus->dev, &count, count_dimms);
1003 	dev_dbg(&nvdimm_bus->dev, "count: %d\n", count);
1004 	if (count != dimm_count)
1005 		return -ENXIO;
1006 	return 0;
1007 }
1008 EXPORT_SYMBOL_GPL(nvdimm_bus_check_dimm_count);
1009 
nvdimm_devs_exit(void)1010 void __exit nvdimm_devs_exit(void)
1011 {
1012 	ida_destroy(&dimm_ida);
1013 }
1014